^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* By Ross Biro 1/23/92 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Pentium III FXSR, SSE support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Gareth Hughes <gareth@valinux.com>, May 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/seccomp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/context_tracking.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/fpu/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/fpu/regset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/debugreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/ldt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/prctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/syscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/fsgsbase.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/io_bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "tls.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) enum x86_regset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) REGSET_GENERAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) REGSET_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) REGSET_XFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) REGSET_IOPERM64 = REGSET_XFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) REGSET_XSTATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) REGSET_TLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) REGSET_IOPERM32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct pt_regs_offset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define REG_OFFSET_END {.name = NULL, .offset = 0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static const struct pt_regs_offset regoffset_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) REG_OFFSET_NAME(r15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) REG_OFFSET_NAME(r14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) REG_OFFSET_NAME(r13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) REG_OFFSET_NAME(r12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) REG_OFFSET_NAME(r11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) REG_OFFSET_NAME(r10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) REG_OFFSET_NAME(r9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) REG_OFFSET_NAME(r8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) REG_OFFSET_NAME(bx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) REG_OFFSET_NAME(cx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) REG_OFFSET_NAME(dx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) REG_OFFSET_NAME(si),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) REG_OFFSET_NAME(di),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) REG_OFFSET_NAME(bp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) REG_OFFSET_NAME(ax),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) REG_OFFSET_NAME(ds),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) REG_OFFSET_NAME(es),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) REG_OFFSET_NAME(fs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) REG_OFFSET_NAME(gs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) REG_OFFSET_NAME(orig_ax),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) REG_OFFSET_NAME(ip),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) REG_OFFSET_NAME(cs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) REG_OFFSET_NAME(flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) REG_OFFSET_NAME(sp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) REG_OFFSET_NAME(ss),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) REG_OFFSET_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * regs_query_register_offset() - query register offset from its name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @name: the name of a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * regs_query_register_offset() returns the offset of a register in struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * pt_regs from its name. If the name is invalid, this returns -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int regs_query_register_offset(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) const struct pt_regs_offset *roff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) for (roff = regoffset_table; roff->name != NULL; roff++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!strcmp(roff->name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return roff->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * regs_query_register_name() - query register name from its offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @offset: the offset of a register in struct pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * regs_query_register_name() returns the name of a register from its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) const char *regs_query_register_name(unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) const struct pt_regs_offset *roff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for (roff = regoffset_table; roff->name != NULL; roff++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (roff->offset == offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return roff->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * does not yet catch signals sent when the child dies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * in exit.c or in signal.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Determines which flags the user has access to [1 = access, 0 = no access].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define FLAG_MASK_32 ((unsigned long) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) (X86_EFLAGS_CF | X86_EFLAGS_PF | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) X86_EFLAGS_AF | X86_EFLAGS_ZF | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) X86_EFLAGS_SF | X86_EFLAGS_TF | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) X86_EFLAGS_DF | X86_EFLAGS_OF | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) X86_EFLAGS_RF | X86_EFLAGS_AC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Determines whether a value may be installed in a segment register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline bool invalid_selector(u16 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define FLAG_MASK FLAG_MASK_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return ®s->bx + (regno >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Returning the value truncates it to 16 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (offset != offsetof(struct user_regs_struct, gs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) retval = *pt_regs_access(task_pt_regs(task), offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (task == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) retval = get_user_gs(task_pt_regs(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) retval = task_user_gs(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static int set_segment_reg(struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long offset, u16 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (WARN_ON_ONCE(task == current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * The value argument was already truncated to 16 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (invalid_selector(value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * For %cs and %ss we cannot permit a null selector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * We can permit a bogus selector as long as it has USER_RPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Null selectors are fine for other segment registers, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * we will never get back to user mode with invalid %cs or %ss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * and will take the trap in iret instead. Much code relies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * on user_mode() to distinguish a user trap frame (which can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * safely use invalid selectors) from a kernel trap frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) case offsetof(struct user_regs_struct, cs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) case offsetof(struct user_regs_struct, ss):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (unlikely(value == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) *pt_regs_access(task_pt_regs(task), offset) = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) case offsetof(struct user_regs_struct, gs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) task_user_gs(task) = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #else /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return ®s->r15 + (offset / sizeof(regs->r15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * Returning the value truncates it to 16 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned int seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) case offsetof(struct user_regs_struct, fs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (task == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Older gas can't assemble movq %?s,%r?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) asm("movl %%fs,%0" : "=r" (seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return task->thread.fsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) case offsetof(struct user_regs_struct, gs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (task == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) asm("movl %%gs,%0" : "=r" (seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return task->thread.gsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) case offsetof(struct user_regs_struct, ds):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (task == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) asm("movl %%ds,%0" : "=r" (seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return task->thread.ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) case offsetof(struct user_regs_struct, es):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (task == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) asm("movl %%es,%0" : "=r" (seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return task->thread.es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case offsetof(struct user_regs_struct, cs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) case offsetof(struct user_regs_struct, ss):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return *pt_regs_access(task_pt_regs(task), offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static int set_segment_reg(struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) unsigned long offset, u16 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (WARN_ON_ONCE(task == current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * The value argument was already truncated to 16 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (invalid_selector(value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Writes to FS and GS will change the stored selector. Whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * this changes the segment base as well depends on whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * FSGSBASE is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) case offsetof(struct user_regs_struct,fs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) task->thread.fsindex = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case offsetof(struct user_regs_struct,gs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) task->thread.gsindex = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case offsetof(struct user_regs_struct,ds):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) task->thread.ds = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) case offsetof(struct user_regs_struct,es):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) task->thread.es = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Can't actually change these in 64-bit mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) case offsetof(struct user_regs_struct,cs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (unlikely(value == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) task_pt_regs(task)->cs = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) case offsetof(struct user_regs_struct,ss):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (unlikely(value == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) task_pt_regs(task)->ss = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #endif /* CONFIG_X86_32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static unsigned long get_flags(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) unsigned long retval = task_pt_regs(task)->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * If the debugger set TF, hide it from the readout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (test_tsk_thread_flag(task, TIF_FORCED_TF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) retval &= ~X86_EFLAGS_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static int set_flags(struct task_struct *task, unsigned long value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct pt_regs *regs = task_pt_regs(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * If the user value contains TF, mark that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * it was not "us" (the debugger) that set it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * If not, make sure it stays set if we had.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (value & X86_EFLAGS_TF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) clear_tsk_thread_flag(task, TIF_FORCED_TF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) value |= X86_EFLAGS_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static int putreg(struct task_struct *child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned long offset, unsigned long value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) case offsetof(struct user_regs_struct, cs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) case offsetof(struct user_regs_struct, ds):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) case offsetof(struct user_regs_struct, es):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) case offsetof(struct user_regs_struct, fs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) case offsetof(struct user_regs_struct, gs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) case offsetof(struct user_regs_struct, ss):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return set_segment_reg(child, offset, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) case offsetof(struct user_regs_struct, flags):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return set_flags(child, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) case offsetof(struct user_regs_struct,fs_base):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (value >= TASK_SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) x86_fsbase_write_task(child, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) case offsetof(struct user_regs_struct,gs_base):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (value >= TASK_SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) x86_gsbase_write_task(child, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *pt_regs_access(task_pt_regs(child), offset) = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static unsigned long getreg(struct task_struct *task, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) case offsetof(struct user_regs_struct, cs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case offsetof(struct user_regs_struct, ds):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) case offsetof(struct user_regs_struct, es):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) case offsetof(struct user_regs_struct, fs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) case offsetof(struct user_regs_struct, gs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) case offsetof(struct user_regs_struct, ss):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return get_segment_reg(task, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) case offsetof(struct user_regs_struct, flags):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return get_flags(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) case offsetof(struct user_regs_struct, fs_base):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return x86_fsbase_read_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) case offsetof(struct user_regs_struct, gs_base):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return x86_gsbase_read_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return *pt_regs_access(task_pt_regs(task), offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static int genregs_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) for (reg = 0; to.left; reg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) membuf_store(&to, getreg(target, reg * sizeof(unsigned long)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static int genregs_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (kbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) const unsigned long *k = kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) while (count >= sizeof(*k) && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ret = putreg(target, pos, *k++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) count -= sizeof(*k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) pos += sizeof(*k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) const unsigned long __user *u = ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) while (count >= sizeof(*u) && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned long word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = __get_user(word, u++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ret = putreg(target, pos, word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) count -= sizeof(*u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) pos += sizeof(*u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void ptrace_triggered(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct perf_sample_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct thread_struct *thread = &(current->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * Store in the virtual DR6 register the fact that the breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * was hit so the thread's debugger will see it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) for (i = 0; i < HBP_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (thread->ptrace_bps[i] == bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) thread->virtual_dr6 |= (DR_TRAP0 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * Walk through every ptrace breakpoints for this thread and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * build the dr7 value on top of their attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static unsigned long ptrace_get_dr7(struct perf_event *bp[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int dr7 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct arch_hw_breakpoint *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) for (i = 0; i < HBP_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (bp[i] && !bp[i]->attr.disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) info = counter_arch_bp(bp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) dr7 |= encode_dr7(i, info->len, info->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int len, int type, bool disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int err, bp_len, bp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) attr->bp_len = bp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) attr->bp_type = bp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) attr->disabled = disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static struct perf_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned long addr, bool disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ptrace_breakpoint_init(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) attr.bp_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) err = ptrace_fill_bp_fields(&attr, len, type, disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return register_user_hw_breakpoint(&attr, ptrace_triggered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) NULL, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct perf_event_attr attr = bp->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) err = ptrace_fill_bp_fields(&attr, len, type, disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return modify_user_hw_breakpoint(bp, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Handle ptrace writes to debug register 7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct thread_struct *thread = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned long old_dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) bool second_pass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int i, rc, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) data &= ~DR_CONTROL_RESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) for (i = 0; i < HBP_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) unsigned len, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) bool disabled = !decode_dr7(data, i, &len, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct perf_event *bp = thread->ptrace_bps[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (!bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) bp = ptrace_register_breakpoint(tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) len, type, 0, disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (IS_ERR(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) rc = PTR_ERR(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) thread->ptrace_bps[i] = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) rc = ptrace_modify_breakpoint(bp, len, type, disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* Restore if the first pass failed, second_pass shouldn't fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (rc && !WARN_ON(second_pass)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) data = old_dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) second_pass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) goto restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * Handle PTRACE_PEEKUSR calls for the debug register area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct thread_struct *thread = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) unsigned long val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (n < HBP_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int index = array_index_nospec(n, HBP_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct perf_event *bp = thread->ptrace_bps[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) val = bp->hw.info.address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) } else if (n == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) val = thread->virtual_dr6 ^ DR6_RESERVED; /* Flip back to arch polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) } else if (n == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) val = thread->ptrace_dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct thread_struct *t = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct perf_event *bp = t->ptrace_bps[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (!bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Put stub len and type to create an inactive but correct bp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * CHECKME: the previous code returned -EIO if the addr wasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * a valid task virtual addr. The new one will return -EINVAL in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * -EINVAL may be what we want for in-kernel breakpoints users,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * but -EIO looks better for ptrace, since we refuse a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * writing for the user. And anyway this is the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * behaviour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) bp = ptrace_register_breakpoint(tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (IS_ERR(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) err = PTR_ERR(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) t->ptrace_bps[nr] = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct perf_event_attr attr = bp->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) attr.bp_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) err = modify_user_hw_breakpoint(bp, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * Handle PTRACE_POKEUSR calls for the debug register area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static int ptrace_set_debugreg(struct task_struct *tsk, int n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct thread_struct *thread = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* There are no DR4 or DR5 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (n < HBP_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) rc = ptrace_set_breakpoint_addr(tsk, n, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) } else if (n == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) thread->virtual_dr6 = val ^ DR6_RESERVED; /* Flip to positive polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) } else if (n == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) rc = ptrace_write_dr7(tsk, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) thread->ptrace_dr7 = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * These access the current or another (stopped) task's io permission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * bitmap for debugging or core dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static int ioperm_active(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) const struct user_regset *regset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct io_bitmap *iobm = target->thread.io_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return iobm ? DIV_ROUND_UP(iobm->max, regset->size) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int ioperm_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct io_bitmap *iobm = target->thread.io_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!iobm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return membuf_write(&to, iobm->bitmap, IO_BITMAP_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * Called by kernel/ptrace.c when detaching..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * Make sure the single step bit is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) void ptrace_disable(struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) user_disable_single_step(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static const struct user_regset_view user_x86_32_view; /* Initialized below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) long arch_ptrace(struct task_struct *child, long request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) unsigned long addr, unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) unsigned long __user *datap = (unsigned long __user *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) switch (request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* read the word at location addr in the USER area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) case PTRACE_PEEKUSR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) tmp = 0; /* Default return condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (addr < sizeof(struct user_regs_struct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) tmp = getreg(child, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) else if (addr >= offsetof(struct user, u_debugreg[0]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) addr <= offsetof(struct user, u_debugreg[7])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) addr -= offsetof(struct user, u_debugreg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) tmp = ptrace_get_debugreg(child, addr / sizeof(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ret = put_user(tmp, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (addr < sizeof(struct user_regs_struct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ret = putreg(child, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) else if (addr >= offsetof(struct user, u_debugreg[0]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) addr <= offsetof(struct user, u_debugreg[7])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) addr -= offsetof(struct user, u_debugreg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ret = ptrace_set_debugreg(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) addr / sizeof(data), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) case PTRACE_GETREGS: /* Get all gp regs from the child. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return copy_regset_to_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) task_user_regset_view(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) REGSET_GENERAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 0, sizeof(struct user_regs_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) case PTRACE_SETREGS: /* Set all gp regs in the child. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return copy_regset_from_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) task_user_regset_view(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) REGSET_GENERAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 0, sizeof(struct user_regs_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) case PTRACE_GETFPREGS: /* Get the child FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return copy_regset_to_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) task_user_regset_view(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) REGSET_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 0, sizeof(struct user_i387_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) case PTRACE_SETFPREGS: /* Set the child FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return copy_regset_from_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) task_user_regset_view(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) REGSET_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 0, sizeof(struct user_i387_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return copy_regset_to_user(child, &user_x86_32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) REGSET_XFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 0, sizeof(struct user_fxsr_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) datap) ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return copy_regset_from_user(child, &user_x86_32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) REGSET_XFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 0, sizeof(struct user_fxsr_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) datap) ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) case PTRACE_GET_THREAD_AREA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if ((int) addr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ret = do_get_thread_area(child, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) (struct user_desc __user *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) case PTRACE_SET_THREAD_AREA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if ((int) addr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ret = do_set_thread_area(child, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) (struct user_desc __user *)data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* normal 64bit interface to access TLS data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) Works just like arch_prctl, except that the arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) are reversed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) case PTRACE_ARCH_PRCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ret = do_arch_prctl_64(child, data, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ret = ptrace_request(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) #ifdef CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #include <asm/ia32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) #include <asm/user32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) #define R32(l,q) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) case offsetof(struct user32, regs.l): \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) regs->q = value; break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) #define SEG32(rs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) case offsetof(struct user32, regs.rs): \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return set_segment_reg(child, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) offsetof(struct user_regs_struct, rs), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) value); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static int putreg32(struct task_struct *child, unsigned regno, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct pt_regs *regs = task_pt_regs(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) switch (regno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) SEG32(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) SEG32(ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) SEG32(es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * A 32-bit ptracer on a 64-bit kernel expects that writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * FS or GS will also update the base. This is needed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * operations like PTRACE_SETREGS to fully restore a saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * CPU state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) case offsetof(struct user32, regs.fs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ret = set_segment_reg(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) offsetof(struct user_regs_struct, fs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) child->thread.fsbase =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) x86_fsgsbase_read_task(child, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) case offsetof(struct user32, regs.gs):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ret = set_segment_reg(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) offsetof(struct user_regs_struct, gs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) child->thread.gsbase =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) x86_fsgsbase_read_task(child, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) SEG32(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) R32(ebx, bx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) R32(ecx, cx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) R32(edx, dx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) R32(edi, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) R32(esi, si);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) R32(ebp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) R32(eax, ax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) R32(eip, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) R32(esp, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) case offsetof(struct user32, regs.orig_eax):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * Warning: bizarre corner case fixup here. A 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * debugger setting orig_eax to -1 wants to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * syscall restart. Make sure that the syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * restart code sign-extends orig_ax. Also make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * we interpret the -ERESTART* codes correctly if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * loaded into regs->ax in case the task is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * actually still sitting at the exit from a 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * syscall with TS_COMPAT still set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) regs->orig_ax = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (syscall_get_nr(child, regs) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) child->thread_info.status |= TS_I386_REGS_POKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) case offsetof(struct user32, regs.eflags):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return set_flags(child, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) case offsetof(struct user32, u_debugreg[0]) ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) offsetof(struct user32, u_debugreg[7]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) regno -= offsetof(struct user32, u_debugreg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return ptrace_set_debugreg(child, regno / 4, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (regno > sizeof(struct user32) || (regno & 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * Other dummy fields in the virtual user structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * are ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) #undef R32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) #undef SEG32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) #define R32(l,q) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) case offsetof(struct user32, regs.l): \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) *val = regs->q; break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) #define SEG32(rs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) case offsetof(struct user32, regs.rs): \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) *val = get_segment_reg(child, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) offsetof(struct user_regs_struct, rs)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct pt_regs *regs = task_pt_regs(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) switch (regno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) SEG32(ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) SEG32(es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) SEG32(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) SEG32(gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) R32(cs, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) R32(ss, ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) R32(ebx, bx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) R32(ecx, cx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) R32(edx, dx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) R32(edi, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) R32(esi, si);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) R32(ebp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) R32(eax, ax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) R32(orig_eax, orig_ax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) R32(eip, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) R32(esp, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) case offsetof(struct user32, regs.eflags):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) *val = get_flags(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) case offsetof(struct user32, u_debugreg[0]) ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) offsetof(struct user32, u_debugreg[7]):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) regno -= offsetof(struct user32, u_debugreg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *val = ptrace_get_debugreg(child, regno / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (regno > sizeof(struct user32) || (regno & 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * Other dummy fields in the virtual user structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * are ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) #undef R32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) #undef SEG32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static int genregs32_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) for (reg = 0; to.left; reg++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) getreg32(target, reg * 4, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) membuf_store(&to, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static int genregs32_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (kbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) const compat_ulong_t *k = kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) while (count >= sizeof(*k) && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ret = putreg32(target, pos, *k++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) count -= sizeof(*k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) pos += sizeof(*k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) const compat_ulong_t __user *u = ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) while (count >= sizeof(*u) && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) compat_ulong_t word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ret = __get_user(word, u++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ret = putreg32(target, pos, word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) count -= sizeof(*u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) pos += sizeof(*u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) compat_ulong_t caddr, compat_ulong_t cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) unsigned long addr = caddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) unsigned long data = cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) void __user *datap = compat_ptr(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) __u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) switch (request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case PTRACE_PEEKUSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ret = getreg32(child, addr, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ret = put_user(val, (__u32 __user *)datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) case PTRACE_POKEUSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ret = putreg32(child, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) case PTRACE_GETREGS: /* Get all gp regs from the child. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return copy_regset_to_user(child, &user_x86_32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) REGSET_GENERAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 0, sizeof(struct user_regs_struct32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) case PTRACE_SETREGS: /* Set all gp regs in the child. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return copy_regset_from_user(child, &user_x86_32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) REGSET_GENERAL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) sizeof(struct user_regs_struct32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) case PTRACE_GETFPREGS: /* Get the child FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return copy_regset_to_user(child, &user_x86_32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) REGSET_FP, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) sizeof(struct user_i387_ia32_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) case PTRACE_SETFPREGS: /* Set the child FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return copy_regset_from_user(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) child, &user_x86_32_view, REGSET_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 0, sizeof(struct user_i387_ia32_struct), datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return copy_regset_to_user(child, &user_x86_32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) REGSET_XFP, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) sizeof(struct user32_fxsr_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return copy_regset_from_user(child, &user_x86_32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) REGSET_XFP, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) sizeof(struct user32_fxsr_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) case PTRACE_GET_THREAD_AREA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) case PTRACE_SET_THREAD_AREA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return arch_ptrace(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return compat_ptrace_request(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) #endif /* CONFIG_IA32_EMULATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) #ifdef CONFIG_X86_X32_ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static long x32_arch_ptrace(struct task_struct *child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) compat_long_t request, compat_ulong_t caddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) compat_ulong_t cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) unsigned long addr = caddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) unsigned long data = cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) void __user *datap = compat_ptr(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) switch (request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* Read 32bits at location addr in the USER area. Only allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) to return the lower 32bits of segment and debug registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) case PTRACE_PEEKUSR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) addr < offsetof(struct user_regs_struct, cs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) tmp = 0; /* Default return condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (addr < sizeof(struct user_regs_struct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) tmp = getreg(child, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) else if (addr >= offsetof(struct user, u_debugreg[0]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) addr <= offsetof(struct user, u_debugreg[7])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) addr -= offsetof(struct user, u_debugreg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) tmp = ptrace_get_debugreg(child, addr / sizeof(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) ret = put_user(tmp, (__u32 __user *)datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* Write the word at location addr in the USER area. Only allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) to update segment and debug registers with the upper 32bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) zero-extended. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) case PTRACE_POKEUSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) addr < offsetof(struct user_regs_struct, cs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (addr < sizeof(struct user_regs_struct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ret = putreg(child, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) else if (addr >= offsetof(struct user, u_debugreg[0]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) addr <= offsetof(struct user, u_debugreg[7])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) addr -= offsetof(struct user, u_debugreg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ret = ptrace_set_debugreg(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) addr / sizeof(data), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) case PTRACE_GETREGS: /* Get all gp regs from the child. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return copy_regset_to_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) task_user_regset_view(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) REGSET_GENERAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 0, sizeof(struct user_regs_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) case PTRACE_SETREGS: /* Set all gp regs in the child. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return copy_regset_from_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) task_user_regset_view(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) REGSET_GENERAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 0, sizeof(struct user_regs_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) case PTRACE_GETFPREGS: /* Get the child FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return copy_regset_to_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) task_user_regset_view(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) REGSET_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 0, sizeof(struct user_i387_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) case PTRACE_SETFPREGS: /* Set the child FPU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return copy_regset_from_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) task_user_regset_view(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) REGSET_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 0, sizeof(struct user_i387_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return compat_ptrace_request(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) compat_ulong_t caddr, compat_ulong_t cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) #ifdef CONFIG_X86_X32_ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (!in_ia32_syscall())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return x32_arch_ptrace(child, request, caddr, cdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) #ifdef CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return ia32_arch_ptrace(child, request, caddr, cdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) #endif /* CONFIG_COMPAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static struct user_regset x86_64_regsets[] __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) [REGSET_GENERAL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) .core_note_type = NT_PRSTATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) .n = sizeof(struct user_regs_struct) / sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) .size = sizeof(long), .align = sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) .regset_get = genregs_get, .set = genregs_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) [REGSET_FP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) .core_note_type = NT_PRFPREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) .n = sizeof(struct user_i387_struct) / sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) .size = sizeof(long), .align = sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) [REGSET_XSTATE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) .core_note_type = NT_X86_XSTATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) .size = sizeof(u64), .align = sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) .active = xstateregs_active, .regset_get = xstateregs_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) .set = xstateregs_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) [REGSET_IOPERM64] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) .core_note_type = NT_386_IOPERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) .n = IO_BITMAP_LONGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) .size = sizeof(long), .align = sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) .active = ioperm_active, .regset_get = ioperm_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) static const struct user_regset_view user_x86_64_view = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) .name = "x86_64", .e_machine = EM_X86_64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) #else /* CONFIG_X86_32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #define user_regs_struct32 user_regs_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) #define genregs32_get genregs_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) #define genregs32_set genregs_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) #endif /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) static struct user_regset x86_32_regsets[] __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) [REGSET_GENERAL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) .core_note_type = NT_PRSTATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) .n = sizeof(struct user_regs_struct32) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) .size = sizeof(u32), .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) .regset_get = genregs32_get, .set = genregs32_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) [REGSET_FP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) .core_note_type = NT_PRFPREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) .size = sizeof(u32), .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) .active = regset_fpregs_active, .regset_get = fpregs_get, .set = fpregs_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) [REGSET_XFP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) .core_note_type = NT_PRXFPREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) .size = sizeof(u32), .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) [REGSET_XSTATE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) .core_note_type = NT_X86_XSTATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) .size = sizeof(u64), .align = sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) .active = xstateregs_active, .regset_get = xstateregs_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) .set = xstateregs_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) [REGSET_TLS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) .core_note_type = NT_386_TLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) .size = sizeof(struct user_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) .align = sizeof(struct user_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) .active = regset_tls_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .regset_get = regset_tls_get, .set = regset_tls_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) [REGSET_IOPERM32] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) .core_note_type = NT_386_IOPERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) .n = IO_BITMAP_BYTES / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) .size = sizeof(u32), .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) .active = ioperm_active, .regset_get = ioperm_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static const struct user_regset_view user_x86_32_view = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) .name = "i386", .e_machine = EM_386,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * This represents bytes 464..511 in the memory layout exported through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * the REGSET_XSTATE interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) const struct user_regset_view *task_user_regset_view(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) #ifdef CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (!user_64bit_mode(task_pt_regs(task)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return &user_x86_32_view;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return &user_x86_64_view;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) tsk->thread.trap_nr = X86_TRAP_DB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) tsk->thread.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /* Send us the fake SIGTRAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) force_sig_fault(SIGTRAP, si_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) user_mode(regs) ? (void __user *)regs->ip : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) void user_single_step_report(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) send_sigtrap(regs, 0, TRAP_BRKPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }