^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/kernel/ptrace.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * By Ross Biro 1/23/92
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * edited by Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * ARM modifications Copyright (C) 2000 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/seccomp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/regset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/fpsimd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/mte.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/pointer_auth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/syscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/system_misc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <trace/events/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct pt_regs_offset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define REG_OFFSET_END {.name = NULL, .offset = 0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define GPR_OFFSET_NAME(r) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static const struct pt_regs_offset regoffset_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) GPR_OFFSET_NAME(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) GPR_OFFSET_NAME(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) GPR_OFFSET_NAME(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) GPR_OFFSET_NAME(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) GPR_OFFSET_NAME(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) GPR_OFFSET_NAME(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) GPR_OFFSET_NAME(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) GPR_OFFSET_NAME(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) GPR_OFFSET_NAME(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) GPR_OFFSET_NAME(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) GPR_OFFSET_NAME(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) GPR_OFFSET_NAME(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) GPR_OFFSET_NAME(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) GPR_OFFSET_NAME(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) GPR_OFFSET_NAME(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) GPR_OFFSET_NAME(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) GPR_OFFSET_NAME(16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) GPR_OFFSET_NAME(17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) GPR_OFFSET_NAME(18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) GPR_OFFSET_NAME(19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) GPR_OFFSET_NAME(20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) GPR_OFFSET_NAME(21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) GPR_OFFSET_NAME(22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) GPR_OFFSET_NAME(23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) GPR_OFFSET_NAME(24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) GPR_OFFSET_NAME(25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) GPR_OFFSET_NAME(26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) GPR_OFFSET_NAME(27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) GPR_OFFSET_NAME(28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) GPR_OFFSET_NAME(29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) GPR_OFFSET_NAME(30),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) REG_OFFSET_NAME(sp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) REG_OFFSET_NAME(pc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) REG_OFFSET_NAME(pstate),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) REG_OFFSET_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * regs_query_register_offset() - query register offset from its name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @name: the name of a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * regs_query_register_offset() returns the offset of a register in struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * pt_regs from its name. If the name is invalid, this returns -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int regs_query_register_offset(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) const struct pt_regs_offset *roff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) for (roff = regoffset_table; roff->name != NULL; roff++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!strcmp(roff->name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return roff->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * regs_within_kernel_stack() - check the address in the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @regs: pt_regs which contains kernel stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @addr: address which is checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * If @addr is within the kernel stack, it returns true. If not, returns false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return ((addr & ~(THREAD_SIZE - 1)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) on_irq_stack(addr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * regs_get_kernel_stack_nth() - get Nth entry of the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @regs: pt_regs which contains kernel stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * @n: stack entry number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * is specified by @regs. If the @n th entry is NOT in the kernel stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * this returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) addr += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (regs_within_kernel_stack(regs, (unsigned long)addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * TODO: does not yet catch signals sent when the child dies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * in exit.c or in signal.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Called by kernel/ptrace.c when detaching..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void ptrace_disable(struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * This would be better off in core code, but PTRACE_DETACH has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * grown its fair share of arch-specific worts and changing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * is likely to cause regressions on obscure architectures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) user_disable_single_step(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Handle hitting a HW-breakpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void ptrace_hbptriggered(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct perf_sample_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) const char *desc = "Hardware breakpoint trap (ptrace)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (is_compat_task()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int si_errno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) for (i = 0; i < ARM_MAX_BRP; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (current->thread.debug.hbp_break[i] == bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) si_errno = (i << 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) for (i = 0; i < ARM_MAX_WRP; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (current->thread.debug.hbp_watch[i] == bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) si_errno = -((i << 1) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * Unregister breakpoints from this task and reset the pointers in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * the thread_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct thread_struct *t = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) for (i = 0; i < ARM_MAX_BRP; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (t->debug.hbp_break[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unregister_hw_breakpoint(t->debug.hbp_break[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) t->debug.hbp_break[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) for (i = 0; i < ARM_MAX_WRP; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (t->debug.hbp_watch[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unregister_hw_breakpoint(t->debug.hbp_watch[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) t->debug.hbp_watch[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) void ptrace_hw_copy_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned long idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct perf_event *bp = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) switch (note_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) case NT_ARM_HW_BREAK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (idx >= ARM_MAX_BRP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) idx = array_index_nospec(idx, ARM_MAX_BRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) bp = tsk->thread.debug.hbp_break[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) case NT_ARM_HW_WATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (idx >= ARM_MAX_WRP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) idx = array_index_nospec(idx, ARM_MAX_WRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) bp = tsk->thread.debug.hbp_watch[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int ptrace_hbp_set_event(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned long idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) switch (note_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case NT_ARM_HW_BREAK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (idx >= ARM_MAX_BRP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) idx = array_index_nospec(idx, ARM_MAX_BRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) tsk->thread.debug.hbp_break[idx] = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) case NT_ARM_HW_WATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (idx >= ARM_MAX_WRP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) idx = array_index_nospec(idx, ARM_MAX_WRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) tsk->thread.debug.hbp_watch[idx] = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static struct perf_event *ptrace_hbp_create(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned long idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct perf_event *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int err, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) switch (note_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case NT_ARM_HW_BREAK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) type = HW_BREAKPOINT_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case NT_ARM_HW_WATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) type = HW_BREAKPOINT_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ptrace_breakpoint_init(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Initialise fields to sane defaults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * (i.e. values that will pass validation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) attr.bp_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) attr.bp_len = HW_BREAKPOINT_LEN_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) attr.bp_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) attr.disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (IS_ERR(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct arch_hw_breakpoint_ctrl ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct perf_event_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int err, len, type, offset, disabled = !ctrl.enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) attr->disabled = disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) switch (note_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) case NT_ARM_HW_BREAK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if ((type & HW_BREAKPOINT_X) != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) case NT_ARM_HW_WATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if ((type & HW_BREAKPOINT_RW) != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) attr->bp_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) attr->bp_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) attr->bp_addr += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u8 num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u32 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) switch (note_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) case NT_ARM_HW_BREAK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) num = hw_breakpoint_slots(TYPE_INST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) case NT_ARM_HW_WATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) num = hw_breakpoint_slots(TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) reg |= debug_monitors_arch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) reg |= num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *info = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int ptrace_hbp_get_ctrl(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned long idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) u32 *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (IS_ERR(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return PTR_ERR(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static int ptrace_hbp_get_addr(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned long idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (IS_ERR(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return PTR_ERR(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) *addr = bp ? counter_arch_bp(bp)->address : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) bp = ptrace_hbp_create(note_type, tsk, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static int ptrace_hbp_set_ctrl(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned long idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u32 uctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct perf_event *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct arch_hw_breakpoint_ctrl ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (IS_ERR(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) err = PTR_ERR(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) attr = bp->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) decode_ctrl_reg(uctrl, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return modify_user_hw_breakpoint(bp, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static int ptrace_hbp_set_addr(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) unsigned long idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct perf_event *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (IS_ERR(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) err = PTR_ERR(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) attr = bp->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) attr.bp_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) err = modify_user_hw_breakpoint(bp, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #define PTRACE_HBP_ADDR_SZ sizeof(u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) #define PTRACE_HBP_CTRL_SZ sizeof(u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #define PTRACE_HBP_PAD_SZ sizeof(u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static int hw_break_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) unsigned int note_type = regset->core_note_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int ret, idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) u32 info, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* Resource info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = ptrace_hbp_get_resource_info(note_type, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) membuf_write(&to, &info, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) membuf_zero(&to, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* (address, ctrl) registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) while (to.left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) membuf_store(&to, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) membuf_store(&to, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) membuf_zero(&to, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static int hw_break_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned int note_type = regset->core_note_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int ret, idx = 0, offset, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Resource info and pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) offset = offsetof(struct user_hwdebug_state, dbg_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* (address, ctrl) registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) limit = regset->n * regset->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) while (count && offset < limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (count < PTRACE_HBP_ADDR_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) offset, offset + PTRACE_HBP_ADDR_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) offset += PTRACE_HBP_ADDR_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) offset, offset + PTRACE_HBP_CTRL_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) offset += PTRACE_HBP_CTRL_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) offset + PTRACE_HBP_PAD_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) offset += PTRACE_HBP_PAD_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #endif /* CONFIG_HAVE_HW_BREAKPOINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int gpr_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return membuf_write(&to, uregs, sizeof(*uregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static int gpr_set(struct task_struct *target, const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (!valid_user_regs(&newregs, target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) task_pt_regs(target)->user_regs = newregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static int fpr_active(struct task_struct *target, const struct user_regset *regset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!system_supports_fpsimd())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return regset->n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static int __fpr_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct user_fpsimd_state *uregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) sve_sync_to_fpsimd(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) uregs = &target->thread.uw.fpsimd_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return membuf_write(&to, uregs, sizeof(*uregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static int fpr_get(struct task_struct *target, const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (!system_supports_fpsimd())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (target == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) fpsimd_preserve_current_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return __fpr_get(target, regset, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int __fpr_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) const void *kbuf, const void __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) unsigned int start_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct user_fpsimd_state newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * Ensure target->thread.uw.fpsimd_state is up to date, so that a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * short copyin can't resurrect stale data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) sve_sync_to_fpsimd(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) newstate = target->thread.uw.fpsimd_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) start_pos, start_pos + sizeof(newstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) target->thread.uw.fpsimd_state = newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static int fpr_set(struct task_struct *target, const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!system_supports_fpsimd())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) sve_sync_from_fpsimd_zeropad(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) fpsimd_flush_task_state(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static int tls_get(struct task_struct *target, const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (target == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) tls_preserve_current_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return membuf_store(&to, target->thread.uw.tp_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static int tls_set(struct task_struct *target, const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned long tls = target->thread.uw.tp_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) target->thread.uw.tp_value = tls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static int system_call_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return membuf_store(&to, task_pt_regs(target)->syscallno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static int system_call_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) int syscallno = task_pt_regs(target)->syscallno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) task_pt_regs(target)->syscallno = syscallno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) #ifdef CONFIG_ARM64_SVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static void sve_init_header_from_task(struct user_sve_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct task_struct *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) unsigned int vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) memset(header, 0, sizeof(*header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) header->flags |= SVE_PT_VL_INHERIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) header->vl = target->thread.sve_vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) vq = sve_vq_from_vl(header->vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) header->max_vl = sve_max_vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) header->size = SVE_PT_SIZE(vq, header->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) SVE_PT_REGS_SVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static unsigned int sve_size_from_header(struct user_sve_header const *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return ALIGN(header->size, SVE_VQ_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static int sve_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct user_sve_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) unsigned int vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!system_supports_sve())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) sve_init_header_from_task(&header, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) vq = sve_vq_from_vl(header.vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) membuf_write(&to, &header, sizeof(header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (target == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) fpsimd_preserve_current_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* Registers: FPSIMD-only case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return __fpr_get(target, regset, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* Otherwise: full SVE case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) start = SVE_PT_SVE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) membuf_write(&to, target->thread.sve_state, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) end = SVE_PT_SVE_FPSR_OFFSET(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) membuf_zero(&to, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * Copy fpsr, and fpcr which must follow contiguously in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * struct fpsimd_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) end = sve_size_from_header(&header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return membuf_zero(&to, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) static int sve_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct user_sve_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) unsigned int vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!system_supports_sve())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* Header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (count < sizeof(header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 0, sizeof(header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * sve_set_vector_length(), which will also validate them for us:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ret = sve_set_vector_length(target, header.vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* Actual VL set may be less than the user asked for: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) vq = sve_vq_from_vl(target->thread.sve_vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* Registers: FPSIMD-only case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) SVE_PT_FPSIMD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) clear_tsk_thread_flag(target, TIF_SVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* Otherwise: full SVE case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * If setting a different VL from the requested VL and there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * register data, the data layout will be wrong: don't even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * try to set the registers in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (count && vq != sve_vq_from_vl(header.vl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) sve_alloc(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * Ensure target->thread.sve_state is up to date with target's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * FPSIMD regs, so that a short copyin leaves trailing registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * unmodified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) fpsimd_sync_to_sve(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) set_tsk_thread_flag(target, TIF_SVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) start = SVE_PT_SVE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) target->thread.sve_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) end = SVE_PT_SVE_FPSR_OFFSET(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * Copy fpsr, and fpcr which must follow contiguously in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * struct fpsimd_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) &target->thread.uw.fpsimd_state.fpsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) fpsimd_flush_task_state(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) #endif /* CONFIG_ARM64_SVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) #ifdef CONFIG_ARM64_PTR_AUTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static int pac_mask_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * The PAC bits can differ across data and instruction pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * depending on TCR_EL1.TBID*, which we may make use of in future, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * we expose separate masks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) unsigned long mask = ptrauth_user_pac_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct user_pac_mask uregs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) .data_mask = mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) .insn_mask = mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!system_supports_address_auth())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return membuf_write(&to, &uregs, sizeof(uregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) static int pac_enabled_keys_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) long enabled_keys = ptrauth_get_enabled_keys(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (IS_ERR_VALUE(enabled_keys))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return enabled_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static int pac_enabled_keys_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) long enabled_keys = ptrauth_get_enabled_keys(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (IS_ERR_VALUE(enabled_keys))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return enabled_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) enabled_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) #ifdef CONFIG_CHECKPOINT_RESTORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return (__uint128_t)key->hi << 64 | key->lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct ptrauth_key key = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .lo = (unsigned long)ukey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) .hi = (unsigned long)(ukey >> 64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) const struct ptrauth_keys_user *keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ukeys->apiakey = pac_key_to_user(&keys->apia);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ukeys->apibkey = pac_key_to_user(&keys->apib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ukeys->apdakey = pac_key_to_user(&keys->apda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ukeys->apdbkey = pac_key_to_user(&keys->apdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) const struct user_pac_address_keys *ukeys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) keys->apia = pac_key_from_user(ukeys->apiakey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) keys->apib = pac_key_from_user(ukeys->apibkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) keys->apda = pac_key_from_user(ukeys->apdakey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) keys->apdb = pac_key_from_user(ukeys->apdbkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static int pac_address_keys_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct ptrauth_keys_user *keys = &target->thread.keys_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct user_pac_address_keys user_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (!system_supports_address_auth())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) pac_address_keys_to_user(&user_keys, keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return membuf_write(&to, &user_keys, sizeof(user_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static int pac_address_keys_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct ptrauth_keys_user *keys = &target->thread.keys_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct user_pac_address_keys user_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (!system_supports_address_auth())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) pac_address_keys_to_user(&user_keys, keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) &user_keys, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) pac_address_keys_from_user(keys, &user_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) const struct ptrauth_keys_user *keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ukeys->apgakey = pac_key_to_user(&keys->apga);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) const struct user_pac_generic_keys *ukeys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) keys->apga = pac_key_from_user(ukeys->apgakey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static int pac_generic_keys_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct ptrauth_keys_user *keys = &target->thread.keys_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct user_pac_generic_keys user_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (!system_supports_generic_auth())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) pac_generic_keys_to_user(&user_keys, keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return membuf_write(&to, &user_keys, sizeof(user_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int pac_generic_keys_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct ptrauth_keys_user *keys = &target->thread.keys_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct user_pac_generic_keys user_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!system_supports_generic_auth())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) pac_generic_keys_to_user(&user_keys, keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) &user_keys, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) pac_generic_keys_from_user(keys, &user_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) #endif /* CONFIG_CHECKPOINT_RESTORE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) #endif /* CONFIG_ARM64_PTR_AUTH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static int tagged_addr_ctrl_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) long ctrl = get_tagged_addr_ctrl(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (IS_ERR_VALUE(ctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return membuf_write(&to, &ctrl, sizeof(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static int tagged_addr_ctrl_set(struct task_struct *target, const struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) user_regset *regset, unsigned int pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) unsigned int count, const void *kbuf, const
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) long ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return set_tagged_addr_ctrl(target, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) enum aarch64_regset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) REGSET_GPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) REGSET_FPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) REGSET_TLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) REGSET_HW_BREAK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) REGSET_HW_WATCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) REGSET_SYSTEM_CALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) #ifdef CONFIG_ARM64_SVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) REGSET_SVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) #ifdef CONFIG_ARM64_PTR_AUTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) REGSET_PAC_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) REGSET_PAC_ENABLED_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) #ifdef CONFIG_CHECKPOINT_RESTORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) REGSET_PACA_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) REGSET_PACG_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) REGSET_TAGGED_ADDR_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static const struct user_regset aarch64_regsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) [REGSET_GPR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) .core_note_type = NT_PRSTATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) .n = sizeof(struct user_pt_regs) / sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) .size = sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) .align = sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) .regset_get = gpr_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) .set = gpr_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) [REGSET_FPR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) .core_note_type = NT_PRFPREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * We pretend we have 32-bit registers because the fpsr and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * fpcr are 32-bits wide.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .size = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) .active = fpr_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) .regset_get = fpr_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) .set = fpr_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) [REGSET_TLS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) .core_note_type = NT_ARM_TLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) .n = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) .size = sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .align = sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) .regset_get = tls_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) .set = tls_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) [REGSET_HW_BREAK] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .core_note_type = NT_ARM_HW_BREAK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) .size = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) .regset_get = hw_break_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .set = hw_break_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) [REGSET_HW_WATCH] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) .core_note_type = NT_ARM_HW_WATCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) .size = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) .regset_get = hw_break_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) .set = hw_break_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) [REGSET_SYSTEM_CALL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) .core_note_type = NT_ARM_SYSTEM_CALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) .n = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) .size = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) .align = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) .regset_get = system_call_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) .set = system_call_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) #ifdef CONFIG_ARM64_SVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) [REGSET_SVE] = { /* Scalable Vector Extension */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) .core_note_type = NT_ARM_SVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) SVE_VQ_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) .size = SVE_VQ_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) .align = SVE_VQ_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) .regset_get = sve_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) .set = sve_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) #ifdef CONFIG_ARM64_PTR_AUTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) [REGSET_PAC_MASK] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) .core_note_type = NT_ARM_PAC_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) .n = sizeof(struct user_pac_mask) / sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) .size = sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) .align = sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) .regset_get = pac_mask_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* this cannot be set dynamically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) [REGSET_PAC_ENABLED_KEYS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) .n = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) .size = sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) .align = sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) .regset_get = pac_enabled_keys_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) .set = pac_enabled_keys_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) #ifdef CONFIG_CHECKPOINT_RESTORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) [REGSET_PACA_KEYS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) .core_note_type = NT_ARM_PACA_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) .size = sizeof(__uint128_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) .align = sizeof(__uint128_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) .regset_get = pac_address_keys_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) .set = pac_address_keys_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) [REGSET_PACG_KEYS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) .core_note_type = NT_ARM_PACG_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) .size = sizeof(__uint128_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) .align = sizeof(__uint128_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) .regset_get = pac_generic_keys_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) .set = pac_generic_keys_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) [REGSET_TAGGED_ADDR_CTRL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) .n = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) .size = sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) .align = sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) .regset_get = tagged_addr_ctrl_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) .set = tagged_addr_ctrl_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) static const struct user_regset_view user_aarch64_view = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) .name = "aarch64", .e_machine = EM_AARCH64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) enum compat_regset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) REGSET_COMPAT_GPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) REGSET_COMPAT_VFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct pt_regs *regs = task_pt_regs(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) case 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return regs->pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return pstate_to_compat_psr(regs->pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) case 17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return regs->orig_x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return regs->regs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) static int compat_gpr_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) while (to.left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) membuf_store(&to, compat_get_user_reg(target, i++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static int compat_gpr_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct pt_regs newregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) unsigned int i, start, num_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) /* Calculate the number of AArch32 registers contained in count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) num_regs = count / regset->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /* Convert pos into an register number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) start = pos / regset->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (start + num_regs > regset->n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) newregs = *task_pt_regs(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) for (i = 0; i < num_regs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) unsigned int idx = start + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) compat_ulong_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (kbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) memcpy(®, kbuf, sizeof(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) kbuf += sizeof(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) ret = copy_from_user(®, ubuf, sizeof(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ubuf += sizeof(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) case 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) newregs.pc = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) reg = compat_psr_to_pstate(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) newregs.pstate = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) case 17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) newregs.orig_x0 = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) newregs.regs[idx] = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (valid_user_regs(&newregs.user_regs, target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) *task_pt_regs(target) = newregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static int compat_vfp_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct user_fpsimd_state *uregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) compat_ulong_t fpscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (!system_supports_fpsimd())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) uregs = &target->thread.uw.fpsimd_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (target == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) fpsimd_preserve_current_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * The VFP registers are packed into the fpsimd_state, so they all sit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * nicely together for us. We just need to create the fpscr separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return membuf_store(&to, fpscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static int compat_vfp_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) struct user_fpsimd_state *uregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) compat_ulong_t fpscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) int ret, vregs_end_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (!system_supports_fpsimd())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) uregs = &target->thread.uw.fpsimd_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) vregs_end_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (count && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) vregs_end_pos, VFP_STATE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) fpsimd_flush_task_state(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) static int compat_tls_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static int compat_tls_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) const struct user_regset *regset, unsigned int pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) unsigned int count, const void *kbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) compat_ulong_t tls = target->thread.uw.tp_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) target->thread.uw.tp_value = tls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static const struct user_regset aarch32_regsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) [REGSET_COMPAT_GPR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) .core_note_type = NT_PRSTATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) .n = COMPAT_ELF_NGREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) .size = sizeof(compat_elf_greg_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) .align = sizeof(compat_elf_greg_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) .regset_get = compat_gpr_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) .set = compat_gpr_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) [REGSET_COMPAT_VFP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) .core_note_type = NT_ARM_VFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) .size = sizeof(compat_ulong_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) .align = sizeof(compat_ulong_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) .active = fpr_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) .regset_get = compat_vfp_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) .set = compat_vfp_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static const struct user_regset_view user_aarch32_view = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) .name = "aarch32", .e_machine = EM_ARM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static const struct user_regset aarch32_ptrace_regsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) [REGSET_GPR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) .core_note_type = NT_PRSTATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) .n = COMPAT_ELF_NGREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) .size = sizeof(compat_elf_greg_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) .align = sizeof(compat_elf_greg_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) .regset_get = compat_gpr_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) .set = compat_gpr_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) [REGSET_FPR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) .core_note_type = NT_ARM_VFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) .size = sizeof(compat_ulong_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) .align = sizeof(compat_ulong_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) .regset_get = compat_vfp_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) .set = compat_vfp_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) [REGSET_TLS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) .core_note_type = NT_ARM_TLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) .n = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) .size = sizeof(compat_ulong_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) .align = sizeof(compat_ulong_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) .regset_get = compat_tls_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) .set = compat_tls_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) [REGSET_HW_BREAK] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) .core_note_type = NT_ARM_HW_BREAK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) .size = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) .regset_get = hw_break_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) .set = hw_break_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) [REGSET_HW_WATCH] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) .core_note_type = NT_ARM_HW_WATCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) .size = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) .regset_get = hw_break_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) .set = hw_break_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) [REGSET_SYSTEM_CALL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) .core_note_type = NT_ARM_SYSTEM_CALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) .n = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) .size = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) .align = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) .regset_get = system_call_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) .set = system_call_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) static const struct user_regset_view user_aarch32_ptrace_view = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) .name = "aarch32", .e_machine = EM_ARM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) compat_ulong_t __user *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) compat_ulong_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (off & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (off == COMPAT_PT_TEXT_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) tmp = tsk->mm->start_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) else if (off == COMPAT_PT_DATA_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) tmp = tsk->mm->start_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) else if (off == COMPAT_PT_TEXT_END_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) tmp = tsk->mm->end_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) else if (off < sizeof(compat_elf_gregset_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) tmp = compat_get_user_reg(tsk, off >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) else if (off >= COMPAT_USER_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return put_user(tmp, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) compat_ulong_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct pt_regs newregs = *task_pt_regs(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) unsigned int idx = off / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (off & 3 || off >= COMPAT_USER_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (off >= sizeof(compat_elf_gregset_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) case 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) newregs.pc = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) newregs.pstate = compat_psr_to_pstate(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) case 17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) newregs.orig_x0 = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) newregs.regs[idx] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (!valid_user_regs(&newregs.user_regs, tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) *task_pt_regs(tsk) = newregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * Convert a virtual register number into an index for a thread_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * breakpoint array. Breakpoints are identified using positive numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * whilst watchpoints are negative. The registers are laid out as pairs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * of (address, control), each pair mapping to a unique hw_breakpoint struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * Register 0 is reserved for describing resource information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return (abs(num) - 1) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) u8 num_brps, num_wrps, debug_arch, wp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) u32 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) num_brps = hw_breakpoint_slots(TYPE_INST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) num_wrps = hw_breakpoint_slots(TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) debug_arch = debug_monitors_arch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) wp_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) reg |= debug_arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) reg |= wp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) reg |= num_wrps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) reg |= num_brps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) *kdata = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static int compat_ptrace_hbp_get(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) compat_long_t num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) u32 *kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) u64 addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) u32 ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) int err, idx = compat_ptrace_hbp_num_to_idx(num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (num & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) *kdata = (u32)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) *kdata = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static int compat_ptrace_hbp_set(unsigned int note_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) compat_long_t num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) u32 *kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) int err, idx = compat_ptrace_hbp_num_to_idx(num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (num & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) addr = *kdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) ctrl = *kdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) compat_ulong_t __user *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) u32 kdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) /* Watchpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (num < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /* Resource info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) } else if (num == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) ret = compat_ptrace_hbp_get_resource_info(&kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /* Breakpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) ret = put_user(kdata, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) compat_ulong_t __user *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) u32 kdata = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (num == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) ret = get_user(kdata, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (num < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) #endif /* CONFIG_HAVE_HW_BREAKPOINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) compat_ulong_t caddr, compat_ulong_t cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) unsigned long addr = caddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) unsigned long data = cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) void __user *datap = compat_ptr(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) switch (request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) case PTRACE_PEEKUSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) ret = compat_ptrace_read_user(child, addr, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) case PTRACE_POKEUSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ret = compat_ptrace_write_user(child, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) case COMPAT_PTRACE_GETREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) ret = copy_regset_to_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) &user_aarch32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) REGSET_COMPAT_GPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 0, sizeof(compat_elf_gregset_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) case COMPAT_PTRACE_SETREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) ret = copy_regset_from_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) &user_aarch32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) REGSET_COMPAT_GPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 0, sizeof(compat_elf_gregset_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) case COMPAT_PTRACE_GET_THREAD_AREA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) (compat_ulong_t __user *)datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) case COMPAT_PTRACE_SET_SYSCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) task_pt_regs(child)->syscallno = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) case COMPAT_PTRACE_GETVFPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) ret = copy_regset_to_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) &user_aarch32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) REGSET_COMPAT_VFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 0, VFP_STATE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) case COMPAT_PTRACE_SETVFPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) ret = copy_regset_from_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) &user_aarch32_view,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) REGSET_COMPAT_VFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 0, VFP_STATE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) case COMPAT_PTRACE_GETHBPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) ret = compat_ptrace_gethbpregs(child, addr, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) case COMPAT_PTRACE_SETHBPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) ret = compat_ptrace_sethbpregs(child, addr, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) ret = compat_ptrace_request(child, request, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) #endif /* CONFIG_COMPAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) const struct user_regset_view *task_user_regset_view(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * Core dumping of 32-bit tasks or compat ptrace requests must use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * user_aarch32_view compatible with arm32. Native ptrace requests on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * 32-bit children use an extended user_aarch32_ptrace_view to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) * access to the TLS register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (is_compat_task())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) return &user_aarch32_view;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) else if (is_compat_thread(task_thread_info(task)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return &user_aarch32_ptrace_view;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return &user_aarch64_view;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) long arch_ptrace(struct task_struct *child, long request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) unsigned long addr, unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) switch (request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) case PTRACE_PEEKMTETAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) case PTRACE_POKEMTETAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) return mte_ptrace_copy_tags(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) return ptrace_request(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) enum ptrace_syscall_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) PTRACE_SYSCALL_ENTER = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) PTRACE_SYSCALL_EXIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static void tracehook_report_syscall(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) enum ptrace_syscall_dir dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) int regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) unsigned long saved_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * We have some ABI weirdness here in the way that we handle syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * exit stops because we indicate whether or not the stop has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * signalled from syscall entry or syscall exit by clobbering a general
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * and restoring its old value after the stop. This means that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * - Any writes by the tracer to this register during the stop are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * ignored/discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * - The actual value of the register is not available during the stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * so the tracer cannot save it and restore it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * - Syscall stops behave differently to seccomp and pseudo-step traps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * (the latter do not nobble any registers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) regno = (is_compat_task() ? 12 : 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) saved_reg = regs->regs[regno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) regs->regs[regno] = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (dir == PTRACE_SYSCALL_ENTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (tracehook_report_syscall_entry(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) forget_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) regs->regs[regno] = saved_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) } else if (!test_thread_flag(TIF_SINGLESTEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) tracehook_report_syscall_exit(regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) regs->regs[regno] = saved_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) regs->regs[regno] = saved_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * Signal a pseudo-step exception since we are stepping but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * tracer modifications to the registers may have rewound the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) tracehook_report_syscall_exit(regs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) int syscall_trace_enter(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) unsigned long flags = READ_ONCE(current_thread_info()->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (flags & _TIF_SYSCALL_EMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return NO_SYSCALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /* Do the secure computing after ptrace; failures should be fast. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (secure_computing() == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) return NO_SYSCALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) trace_sys_enter(regs, regs->syscallno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) regs->regs[2], regs->regs[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) return regs->syscallno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) void syscall_trace_exit(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) unsigned long flags = READ_ONCE(current_thread_info()->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) audit_syscall_exit(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (flags & _TIF_SYSCALL_TRACEPOINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) trace_sys_exit(regs, syscall_get_return_value(current, regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) rseq_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * not described in ARM DDI 0487D.a.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * be allocated an EL0 meaning in future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * Userspace cannot use these until they have an architectural meaning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * We also reserve IL for the kernel; SS is handled dynamically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) #define SPSR_EL1_AARCH64_RES0_BITS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) #define SPSR_EL1_AARCH32_RES0_BITS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) static int valid_compat_regs(struct user_pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if (!system_supports_mixed_endian_el0()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) regs->pstate |= PSR_AA32_E_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) regs->pstate &= ~PSR_AA32_E_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) (regs->pstate & PSR_AA32_A_BIT) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) (regs->pstate & PSR_AA32_I_BIT) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) (regs->pstate & PSR_AA32_F_BIT) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) * Force PSR to a valid 32-bit EL0t, preserving the same bits as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * arch/arm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) PSR_AA32_C_BIT | PSR_AA32_V_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) PSR_AA32_T_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) regs->pstate |= PSR_MODE32_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) static int valid_native_regs(struct user_pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) (regs->pstate & PSR_D_BIT) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) (regs->pstate & PSR_A_BIT) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) (regs->pstate & PSR_I_BIT) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) (regs->pstate & PSR_F_BIT) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) /* Force PSR to a valid 64-bit EL0t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * Are the current registers suitable for user mode? (used to maintain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * security in signal handlers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) user_regs_reset_single_step(regs, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (is_compat_thread(task_thread_info(task)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return valid_compat_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return valid_native_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }