^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Kernel support for the ptrace() and syscall tracing interfaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1999-2005 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2006 Intel Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * 2006-08-12 - IA64 Native Utrace implementation support added by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Derived from the x86 and Alpha versions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/regset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/ptrace_offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/rse.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Bits in the PSR that we allow ptrace() to change:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * be, up, ac, mfl, mfh (the user mask; five bits total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * db (debug breakpoint fault; one bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * id (instruction debug fault disable; one bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * dd (data debug fault disable; one bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * ri (restart instruction; two bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * is (instruction set; one bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PFM_MASK MASK(38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PTRACE_DEBUG 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #if PTRACE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) # define dprintk(format...) printk(format)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) # define inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) # define dprintk(format...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Return TRUE if PT was created due to kernel-entry via a system-call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) in_syscall (struct pt_regs *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return (long) pt->cr_ifs >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * bitset where bit i is set iff the NaT bit of register i is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) # define GET_BITS(first, last, unat) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long bit = ia64_unat_pos(&pt->r##first); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned long nbits = (last - first + 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long mask = MASK(nbits) << first; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long dist; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (bit < first) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) dist = 64 + bit - first; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dist = bit - first; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ia64_rotr(unat, dist) & mask; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Registers that are stored consecutively in struct pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * can be handled in parallel. If the register order in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * struct_pt_regs changes, this code MUST be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) val = GET_BITS( 1, 1, scratch_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) val |= GET_BITS( 2, 3, scratch_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) val |= GET_BITS(12, 13, scratch_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) val |= GET_BITS(14, 14, scratch_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) val |= GET_BITS(15, 15, scratch_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) val |= GET_BITS( 8, 11, scratch_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) val |= GET_BITS(16, 31, scratch_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) # undef GET_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Set the NaT bits for the scratch registers according to NAT and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * return the resulting unat (assuming the scratch registers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * stored in PT).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) # define PUT_BITS(first, last, nat) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long bit = ia64_unat_pos(&pt->r##first); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned long nbits = (last - first + 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long mask = MASK(nbits) << first; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) long dist; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (bit < first) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) dist = 64 + bit - first; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) dist = bit - first; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ia64_rotl(nat & mask, dist); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long scratch_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Registers that are stored consecutively in struct pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * can be handled in parallel. If the register order in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * struct_pt_regs changes, this code MUST be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) scratch_unat = PUT_BITS( 1, 1, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) scratch_unat |= PUT_BITS( 2, 3, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) scratch_unat |= PUT_BITS(12, 13, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) scratch_unat |= PUT_BITS(14, 14, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) scratch_unat |= PUT_BITS(15, 15, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) scratch_unat |= PUT_BITS( 8, 11, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) scratch_unat |= PUT_BITS(16, 31, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return scratch_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) # undef PUT_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define IA64_MLX_TEMPLATE 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define IA64_MOVL_OPCODE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ia64_increment_ip (struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long w0, ri = ia64_psr(regs)->ri + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (ri > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ri = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) regs->cr_iip += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) } else if (ri == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) get_user(w0, (char __user *) regs->cr_iip + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * rfi'ing to slot 2 of an MLX bundle causes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * an illegal operation fault. We don't want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * that to happen...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ri = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) regs->cr_iip += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ia64_psr(regs)->ri = ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ia64_decrement_ip (struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned long w0, ri = ia64_psr(regs)->ri - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (ia64_psr(regs)->ri == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) regs->cr_iip -= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ri = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) get_user(w0, (char __user *) regs->cr_iip + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * rfi'ing to slot 2 of an MLX bundle causes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * an illegal operation fault. We don't want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * that to happen...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ri = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ia64_psr(regs)->ri = ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * This routine is used to read an rnat bits that are stored on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * kernel backing store. Since, in general, the alignment of the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * and kernel are different, this is not completely trivial. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * essence, we need to construct the user RNAT based on up to two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * kernel RNAT values and/or the RNAT value saved in the child's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * user rbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * +--------+ <-- lowest address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * | slot62 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * +--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * | rnat | 0x....1f8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * +--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * | slot00 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * +--------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * | slot01 | > child_regs->ar_rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * +--------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * | slot02 | / kernel rbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * +--------+ +--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * <- child_regs->ar_bspstore | slot61 | <-- krbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * +- - - - + +--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * | slot62 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * +- - - - + +--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * | rnat |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * +- - - - + +--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * vrnat | slot00 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * +- - - - + +--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * = =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * +--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * | slot00 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * +--------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * | slot01 | > child_stack->ar_rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * +--------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * | slot02 | /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * +--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * <--- child_stack->ar_bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * The way to think of this code is as follows: bit 0 in the user rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * value. The kernel rnat value holding this bit is stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * variable rnat0. rnat1 is loaded with the kernel rnat value that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * form the upper bits of the user rnat value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Boundary cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * o when reading the rnat "below" the first rnat slot on the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * backing store, rnat0/rnat1 are set to 0 and the low order bits are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * merged in from pt->ar_rnat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * o when reading the rnat "above" the last rnat slot on the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) get_rnat (struct task_struct *task, struct switch_stack *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned long *krbs, unsigned long *urnat_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned long *urbs_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned long umask = 0, mask, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) long num_regs, nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct pt_regs *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) pt = task_pt_regs(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) kbsp = (unsigned long *) sw->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ubspstore = (unsigned long *) pt->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (urbs_end < urnat_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) nbits = 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) mask = MASK(nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * First, figure out which bit number slot 0 in user-land maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * to in the kernel rnat. Do this by figuring out how many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * register slots we're beyond the user's backingstore and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * then computing the equivalent address in kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) shift = ia64_rse_slot_num(slot0_kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) rnat0_kaddr = rnat1_kaddr - 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (ubspstore + 63 > urnat_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* some bits need to be merged in from pt->ar_rnat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) urnat = (pt->ar_rnat & umask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mask &= ~umask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return urnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) m = mask << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (rnat0_kaddr >= kbsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) rnat0 = sw->ar_rnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) else if (rnat0_kaddr > krbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) rnat0 = *rnat0_kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) urnat |= (rnat0 & m) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) m = mask >> (63 - shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (rnat1_kaddr >= kbsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) rnat1 = sw->ar_rnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) else if (rnat1_kaddr > krbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rnat1 = *rnat1_kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) urnat |= (rnat1 & m) << (63 - shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return urnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * The reverse of get_rnat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) put_rnat (struct task_struct *task, struct switch_stack *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long *urbs_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) long num_regs, nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct pt_regs *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned long cfm, *urbs_kargs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pt = task_pt_regs(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) kbsp = (unsigned long *) sw->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ubspstore = (unsigned long *) pt->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) urbs_kargs = urbs_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (in_syscall(pt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * If entered via syscall, don't allow user to set rnat bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * for syscall args.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cfm = pt->cr_ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (urbs_kargs >= urnat_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) nbits = 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if ((urnat_addr - 63) >= urbs_kargs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) mask = MASK(nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * First, figure out which bit number slot 0 in user-land maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * to in the kernel rnat. Do this by figuring out how many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * register slots we're beyond the user's backingstore and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * then computing the equivalent address in kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) shift = ia64_rse_slot_num(slot0_kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) rnat0_kaddr = rnat1_kaddr - 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (ubspstore + 63 > urnat_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* some bits need to be place in pt->ar_rnat: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) mask &= ~umask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * Note: Section 11.1 of the EAS guarantees that bit 63 of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * rnat slot is ignored. so we don't have to clear it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) rnat0 = (urnat << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) m = mask << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (rnat0_kaddr >= kbsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) else if (rnat0_kaddr > krbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) rnat1 = (urnat >> (63 - shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) m = mask >> (63 - shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (rnat1_kaddr >= kbsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) else if (rnat1_kaddr > krbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) on_kernel_rbs (unsigned long addr, unsigned long bspstore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned long urbs_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) urbs_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Read a word from the user-level backing store of task CHILD. ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * is the user-level address to read the word from, VAL a pointer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * the return value, and USER_BSP gives the end of the user-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * backing store (i.e., it's the address that would be in ar.bsp after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * the user executed a "cover" instruction).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * This routine takes care of accessing the kernel register backing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * store for those registers that got spilled there. It also takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * care of calculating the appropriate RNaT collection words.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned long user_rbs_end, unsigned long addr, long *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct pt_regs *child_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) size_t copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) urbs_end = (long *) user_rbs_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) laddr = (unsigned long *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) child_regs = task_pt_regs(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) bspstore = (unsigned long *) child_regs->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (on_kernel_rbs(addr, (unsigned long) bspstore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) (unsigned long) urbs_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * Attempt to read the RBS in an area that's actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * on the kernel RBS => read the corresponding bits in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * the kernel RBS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) rnat_addr = ia64_rse_rnat_addr(laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (laddr == rnat_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* return NaT collection word itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) *val = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * It is implementation dependent whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * data portion of a NaT value gets saved on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * st8.spill or RSE spill (e.g., see EAS 2.6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * 4.4.4.6 Register Spill and Fill). To get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * consistent behavior across all possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * IA-64 implementations, we return zero in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (laddr < urbs_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * The desired word is on the kernel RBS and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * is not a NaT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) regnum = ia64_rse_num_regs(bspstore, laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) *val = *ia64_rse_skip_regs(krbs, regnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (copied != sizeof(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *val = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) unsigned long user_rbs_end, unsigned long addr, long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned long *bspstore, *krbs, regnum, *laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) unsigned long *urbs_end = (long *) user_rbs_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct pt_regs *child_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) laddr = (unsigned long *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) child_regs = task_pt_regs(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) bspstore = (unsigned long *) child_regs->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (on_kernel_rbs(addr, (unsigned long) bspstore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) (unsigned long) urbs_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Attempt to write the RBS in an area that's actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * on the kernel RBS => write the corresponding bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * in the kernel RBS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (ia64_rse_is_rnat_slot(laddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) put_rnat(child, child_stack, krbs, laddr, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) urbs_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (laddr < urbs_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) regnum = ia64_rse_num_regs(bspstore, laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) *ia64_rse_skip_regs(krbs, regnum) = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) } else if (access_process_vm(child, addr, &val, sizeof(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) FOLL_FORCE | FOLL_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Calculate the address of the end of the user-level register backing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * store. This is the address that would have been stored in ar.bsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * if the user had executed a "cover" instruction right before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * entering the kernel. If CFMP is not NULL, it is used to return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * "current frame mask" that was active at the time the kernel was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * entered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) unsigned long *cfmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) long ndirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) bspstore = (unsigned long *) pt->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (in_syscall(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ndirty += (cfm & 0x7f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) cfm &= ~(1UL << 63); /* clear valid bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (cfmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) *cfmp = cfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Synchronize (i.e, write) the RSE backing store living in kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * space to the VM of the CHILD task. SW and PT are the pointers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * the switch_stack and pt_regs structures, respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * USER_RBS_END is the user-level address at which the backing store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * ends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) unsigned long user_rbs_start, unsigned long user_rbs_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned long addr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* now copy word for word from kernel rbs to user rbs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (access_process_vm(child, addr, &val, sizeof(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) FOLL_FORCE | FOLL_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) unsigned long user_rbs_start, unsigned long user_rbs_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned long addr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* now copy word for word from user rbs to kernel rbs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (access_process_vm(child, addr, &val, sizeof(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) FOLL_FORCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) != sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ret = ia64_poke(child, sw, user_rbs_end, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void do_sync_rbs(struct unw_frame_info *info, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct pt_regs *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned long urbs_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) syncfunc_t fn = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (unw_unwind_to_user(info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) pt = task_pt_regs(info->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * when a thread is stopped (ptraced), debugger might change thread's user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * stack (change memory directly), and we must avoid the RSE stored in kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * to override user stack (user space's RSE is newer than kernel's in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * case). To workaround the issue, we copy kernel RSE to user RSE before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * task is stopped, so user RSE has updated data. we then copy user RSE to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * kernel after the task is resummed from traced stop and kernel will use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * synchronize user RSE to kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) void ia64_ptrace_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) set_notify_resume(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * This is called to read back the register backing store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) void ia64_sync_krbs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * After PTRACE_ATTACH, a thread's register backing store area in user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * space is assumed to contain correct data whenever the thread is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * stopped. arch_ptrace_stop takes care of this on tracing stops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * But if the child was already stopped for job control when we attach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * to it, then it might not ever get into ptrace_stop by the time we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * want to examine the user memory containing the RBS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ptrace_attach_sync_user_rbs (struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct unw_frame_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * If the child is in TASK_STOPPED, we need to change that to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * TASK_TRACED momentarily while we operate on it. This ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * that the child won't be woken up and return to user mode while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * we are doing the sync. (It can only be woken up for SIGKILL.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (child->sighand) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) spin_lock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (child->state == TASK_STOPPED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) set_notify_resume(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) child->state = TASK_TRACED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) spin_unlock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unw_init_from_blocked_task(&info, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) do_sync_rbs(&info, ia64_sync_user_rbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * Now move the child back into TASK_STOPPED if it should be in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * job control stop, so that SIGCONT can be used to wake it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (child->sighand) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) spin_lock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (child->state == TASK_TRACED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) (child->signal->flags & SIGNAL_STOP_STOPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) child->state = TASK_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) spin_unlock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * Write f32-f127 back to task->thread.fph if it has been modified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ia64_flush_fph (struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * Prevent migrating this task while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * we're fiddling with the FPU state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (ia64_is_local_fpu_owner(task) && psr->mfh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) psr->mfh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) task->thread.flags |= IA64_THREAD_FPH_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ia64_save_fpu(&task->thread.fph[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Sync the fph state of the task so that it can be manipulated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * through thread.fph. If necessary, f32-f127 are written back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * thread.fph or, if the fph state hasn't been used before, thread.fph
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * is cleared to zeroes. Also, access to f32-f127 is disabled to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * ensure that the task picks up the state from thread.fph when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * executes again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ia64_sync_fph (struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ia64_flush_fph(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) task->thread.flags |= IA64_THREAD_FPH_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) memset(&task->thread.fph, 0, sizeof(task->thread.fph));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ia64_drop_fpu(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) psr->dfh = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * Change the machine-state of CHILD such that it will return via the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * kernel exit-path, rather than the syscall-exit path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) unsigned long cfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct unw_frame_info info, prev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) unsigned long ip, sp, pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) unw_init_from_blocked_task(&info, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) prev_info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (unw_unwind(&info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) unw_get_sp(&info, &sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) < IA64_PT_REGS_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dprintk("ptrace.%s: ran off the top of the kernel "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) "stack\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (unw_get_pr (&prev_info, &pr) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unw_get_rp(&prev_info, &ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dprintk("ptrace.%s: failed to read "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) "predicate register (ip=0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) __func__, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (unw_is_intr_frame(&info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) && (pr & (1UL << PRED_USER_STACK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Note: at the time of this call, the target task is blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * (aka, "pLvSys") we redirect execution from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * .work_pending_syscall_end to .work_processed_kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unw_get_pr(&prev_info, &pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) pr |= (1UL << PRED_NON_SYSCALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unw_set_pr(&prev_info, pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) pt->cr_ifs = (1UL << 63) | cfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Clear the memory that is NOT written on syscall-entry to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * ensure we do not leak kernel-state to user when execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * resumes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) pt->r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) pt->r3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) pt->r14 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) pt->b7 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) pt->ar_ccv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pt->ar_csd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) pt->ar_ssd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) access_nat_bits (struct task_struct *child, struct pt_regs *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct unw_frame_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) unsigned long *data, int write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) char nat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (write_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) nat_bits = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) dprintk("ptrace: failed to set ar.unat\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) for (regnum = 4; regnum <= 7; ++regnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) unw_get_gr(info, regnum, &dummy, &nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) unw_set_gr(info, regnum, dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) (nat_bits >> regnum) & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) dprintk("ptrace: failed to read ar.unat\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) for (regnum = 4; regnum <= 7; ++regnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) unw_get_gr(info, regnum, &dummy, &nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) nat_bits |= (nat != 0) << regnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) *data = nat_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) access_uarea (struct task_struct *child, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) unsigned long *data, int write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct unw_frame_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct ia64_fpreg fpval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct switch_stack *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct pt_regs *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) long ret, retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) char nat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) pt = task_pt_regs(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) sw = (struct switch_stack *) (child->thread.ksp + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) unw_init_from_blocked_task(&info, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (unw_unwind_to_user(&info) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (((unsigned long) ppr & 0x7) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dprintk("ptrace:unaligned register address %p\n", ppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) || access_uarea(child, PT_AR_EC, &ec, 0) < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) || access_uarea(child, PT_AR_LC, &lc, 0) < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) || access_uarea(child, PT_CFM, &cfm, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* control regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) retval |= __put_user(psr, &ppr->cr_ipsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* app regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) retval |= __put_user(cfm, &ppr->cfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* gr1-gr3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* gr4-gr7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) for (i = 4; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) retval |= __put_user(val, &ppr->gr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* gr8-gr11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* gr12-gr15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* gr16-gr31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* b0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) retval |= __put_user(pt->b0, &ppr->br[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /* b1-b5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) for (i = 1; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (unw_access_br(&info, i, &val, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) __put_user(val, &ppr->br[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* b6-b7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) retval |= __put_user(pt->b6, &ppr->br[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) retval |= __put_user(pt->b7, &ppr->br[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* fr2-fr5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (i = 2; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (unw_get_fr(&info, i, &fpval) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* fr6-fr11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) sizeof(struct ia64_fpreg) * 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* fp scratch regs(12-15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) sizeof(struct ia64_fpreg) * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* fr16-fr31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) for (i = 16; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (unw_get_fr(&info, i, &fpval) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* fph */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) ia64_flush_fph(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) sizeof(ppr->fr[32]) * 96);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* preds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) retval |= __put_user(pt->pr, &ppr->pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* nat bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) retval |= __put_user(nat_bits, &ppr->nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ret = retval ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct unw_frame_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct switch_stack *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct ia64_fpreg fpval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct pt_regs *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) long ret, retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) memset(&fpval, 0, sizeof(fpval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) pt = task_pt_regs(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) sw = (struct switch_stack *) (child->thread.ksp + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) unw_init_from_blocked_task(&info, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (unw_unwind_to_user(&info) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (((unsigned long) ppr & 0x7) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) dprintk("ptrace:unaligned register address %p\n", ppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /* control regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) retval |= __get_user(psr, &ppr->cr_ipsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* app regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) retval |= __get_user(cfm, &ppr->cfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* gr1-gr3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* gr4-gr7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) for (i = 4; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) retval |= __get_user(val, &ppr->gr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /* NaT bit will be set via PT_NAT_BITS: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (unw_set_gr(&info, i, val, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /* gr8-gr11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* gr12-gr15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* gr16-gr31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /* b0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) retval |= __get_user(pt->b0, &ppr->br[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* b1-b5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) for (i = 1; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) retval |= __get_user(val, &ppr->br[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) unw_set_br(&info, i, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* b6-b7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) retval |= __get_user(pt->b6, &ppr->br[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) retval |= __get_user(pt->b7, &ppr->br[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* fr2-fr5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) for (i = 2; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (unw_set_fr(&info, i, fpval) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* fr6-fr11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) sizeof(ppr->fr[6]) * 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* fp scratch regs(12-15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) sizeof(ppr->fr[12]) * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /* fr16-fr31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) for (i = 16; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) retval |= __copy_from_user(&fpval, &ppr->fr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) sizeof(fpval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (unw_set_fr(&info, i, fpval) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* fph */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ia64_sync_fph(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) sizeof(ppr->fr[32]) * 96);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* preds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) retval |= __get_user(pt->pr, &ppr->pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /* nat bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) retval |= __get_user(nat_bits, &ppr->nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) retval |= access_uarea(child, PT_AR_EC, &ec, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) retval |= access_uarea(child, PT_AR_LC, &lc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) retval |= access_uarea(child, PT_CFM, &cfm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ret = retval ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) user_enable_single_step (struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) set_tsk_thread_flag(child, TIF_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) child_psr->ss = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) user_enable_block_step (struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) set_tsk_thread_flag(child, TIF_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) child_psr->tb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) user_disable_single_step (struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* make sure the single step/taken-branch trap bits are not set: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) clear_tsk_thread_flag(child, TIF_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) child_psr->ss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) child_psr->tb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * Called by kernel/ptrace.c when detaching..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * Make sure the single step bit is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ptrace_disable (struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) user_disable_single_step(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) arch_ptrace (struct task_struct *child, long request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) unsigned long addr, unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) switch (request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) case PTRACE_PEEKTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) case PTRACE_PEEKDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /* read word at location addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (ptrace_access_vm(child, addr, &data, sizeof(data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) FOLL_FORCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) != sizeof(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* ensure return value is not mistaken for error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) force_successful_syscall_return();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * by the generic ptrace_request().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) case PTRACE_PEEKUSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* read the word at addr in the USER area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (access_uarea(child, addr, &data, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* ensure return value is not mistaken for error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) force_successful_syscall_return();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) case PTRACE_POKEUSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* write the word at addr in the USER area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (access_uarea(child, addr, &data, 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) case PTRACE_OLD_GETSIGINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* for backwards-compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) case PTRACE_OLD_SETSIGINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /* for backwards-compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) case PTRACE_GETREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return ptrace_getregs(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) (struct pt_all_user_regs __user *) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) case PTRACE_SETREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return ptrace_setregs(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) (struct pt_all_user_regs __user *) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return ptrace_request(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* "asmlinkage" so the input arguments are preserved... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) asmlinkage long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) long arg4, long arg5, long arg6, long arg7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct pt_regs regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (test_thread_flag(TIF_SYSCALL_TRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (tracehook_report_syscall_entry(®s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* copy user rbs to kernel rbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (test_thread_flag(TIF_RESTORE_RSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) ia64_sync_krbs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /* "asmlinkage" so the input arguments are preserved... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) asmlinkage void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) long arg4, long arg5, long arg6, long arg7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct pt_regs regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) int step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) audit_syscall_exit(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) step = test_thread_flag(TIF_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (step || test_thread_flag(TIF_SYSCALL_TRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) tracehook_report_syscall_exit(®s, step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* copy user rbs to kernel rbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (test_thread_flag(TIF_RESTORE_RSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) ia64_sync_krbs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* Utrace implementation starts here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct regset_get {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) void *kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) void __user *ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct regset_set {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) const void *kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) const void __user *ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct regset_getset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct task_struct *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) const struct user_regset *regset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct regset_get get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct regset_set set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) unsigned int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static const ptrdiff_t pt_offsets[32] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) #define R(n) offsetof(struct pt_regs, r##n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) [0] = -1, R(1), R(2), R(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) [4] = -1, [5] = -1, [6] = -1, [7] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) #undef R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) unsigned long addr, unsigned long *data, int write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct pt_regs *pt = task_pt_regs(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) unsigned reg = addr / sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) ptrdiff_t d = pt_offsets[reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (d >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) unsigned long *ptr = (void *)pt + d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) *ptr = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) *data = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) char nat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (write_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /* read NaT bit first: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) unsigned long dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) int ret = unw_get_gr(info, reg, &dummy, &nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return unw_access_gr(info, reg, data, &nat, write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) unsigned long addr, unsigned long *data, int write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct pt_regs *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) unsigned long *ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) pt = task_pt_regs(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) case ELF_BR_OFFSET(0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) ptr = &pt->b0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) data, write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) case ELF_BR_OFFSET(6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ptr = &pt->b6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) case ELF_BR_OFFSET(7):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) ptr = &pt->b7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) *ptr = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) *data = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) unsigned long addr, unsigned long *data, int write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct pt_regs *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) unsigned long cfm, urbs_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) unsigned long *ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) pt = task_pt_regs(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) case ELF_AR_RSC_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* force PL3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) pt->ar_rsc = *data | (3 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) *data = pt->ar_rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) case ELF_AR_BSP_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * By convention, we use PT_AR_BSP to refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * the end of the user-level backing store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * to get the real value of ar.bsp at the time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * the kernel was entered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * Furthermore, when changing the contents of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * PT_AR_BSP (or PT_CFM) while the task is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * blocked in a system call, convert the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * so that the non-system-call exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * path is used. This ensures that the proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * state will be picked up when resuming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * execution. However, it *also* means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * once we write PT_AR_BSP/PT_CFM, it won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * possible to modify the syscall arguments of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * the pending system call any longer. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * shouldn't be an issue because modifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * PT_AR_BSP/PT_CFM generally implies that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * we're either abandoning the pending system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * call or that we defer it's re-execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * (e.g., due to GDB doing an inferior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * function call).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (write_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (*data != urbs_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (in_syscall(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) convert_to_non_syscall(target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) cfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * Simulate user-level write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * of ar.bsp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) pt->loadrs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) pt->ar_bspstore = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) *data = urbs_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) case ELF_AR_BSPSTORE_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) ptr = &pt->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) case ELF_AR_RNAT_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) ptr = &pt->ar_rnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) case ELF_AR_CCV_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) ptr = &pt->ar_ccv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) case ELF_AR_UNAT_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) ptr = &pt->ar_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) case ELF_AR_FPSR_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) ptr = &pt->ar_fpsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) case ELF_AR_PFS_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) ptr = &pt->ar_pfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) case ELF_AR_LC_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return unw_access_ar(info, UNW_AR_LC, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) case ELF_AR_EC_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return unw_access_ar(info, UNW_AR_EC, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) case ELF_AR_CSD_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ptr = &pt->ar_csd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) case ELF_AR_SSD_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) ptr = &pt->ar_ssd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) case ELF_CR_IIP_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) ptr = &pt->cr_iip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) case ELF_CFM_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (write_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (((cfm ^ *data) & PFM_MASK) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (in_syscall(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) convert_to_non_syscall(target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) cfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) | (*data & PFM_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) *data = cfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) case ELF_CR_IPSR_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (write_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) unsigned long tmp = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /* psr.ri==3 is a reserved value: SDM 2:25 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) tmp &= ~IA64_PSR_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) pt->cr_ipsr = ((tmp & IPSR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) | (pt->cr_ipsr & ~IPSR_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) *data = (pt->cr_ipsr & IPSR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) } else if (addr == ELF_NAT_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return access_nat_bits(target, pt, info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) data, write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) else if (addr == ELF_PR_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) ptr = &pt->pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) *ptr = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) *data = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) unsigned long addr, unsigned long *data, int write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return access_elf_gpreg(target, info, addr, data, write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return access_elf_breg(target, info, addr, data, write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return access_elf_areg(target, info, addr, data, write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct regset_membuf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct membuf to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) void do_gpregs_get(struct unw_frame_info *info, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct regset_membuf *dst = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct membuf to = dst->to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) elf_greg_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (unw_unwind_to_user(info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * coredump format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * r0-r31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * predicate registers (p0-p63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * b0-b7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * ip cfm user-mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * ar.rsc ar.bsp ar.bspstore ar.rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /* Skip r0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) membuf_zero(&to, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) for (n = 8; to.left && n < ELF_AR_END_OFFSET; n += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (access_elf_reg(info->task, info, n, ®, 0) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) dst->ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) membuf_store(&to, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) void do_gpregs_set(struct unw_frame_info *info, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct regset_getset *dst = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (unw_unwind_to_user(info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (!dst->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) /* Skip r0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (dst->pos < ELF_GR_OFFSET(1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) &dst->u.set.kbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) &dst->u.set.ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 0, ELF_GR_OFFSET(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (dst->ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) while (dst->count && dst->pos < ELF_AR_END_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) unsigned int n, from, to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) elf_greg_t tmp[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) from = dst->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) to = from + sizeof(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (to > ELF_AR_END_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) to = ELF_AR_END_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /* get up to 16 values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) dst->ret = user_regset_copyin(&dst->pos, &dst->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (dst->ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /* now copy them into registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) for (n = 0; from < dst->pos; from += sizeof(elf_greg_t), n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (access_elf_reg(dst->target, info, from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) &tmp[n], 1) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) dst->ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) void do_fpregs_get(struct unw_frame_info *info, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) struct task_struct *task = info->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) struct regset_membuf *dst = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct membuf to = dst->to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) elf_fpreg_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (unw_unwind_to_user(info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /* Skip pos 0 and 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) membuf_zero(&to, 2 * sizeof(elf_fpreg_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) /* fr2-fr31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) for (n = 2; to.left && n < 32; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (unw_get_fr(info, n, ®)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) dst->ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) membuf_write(&to, ®, sizeof(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) /* fph */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (!to.left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) ia64_flush_fph(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (task->thread.flags & IA64_THREAD_FPH_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) membuf_write(&to, &task->thread.fph, 96 * sizeof(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) membuf_zero(&to, 96 * sizeof(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) void do_fpregs_set(struct unw_frame_info *info, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct regset_getset *dst = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) elf_fpreg_t fpreg, tmp[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) int index, start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (unw_unwind_to_user(info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /* Skip pos 0 and 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) &dst->u.set.kbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) &dst->u.set.ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 0, ELF_FP_OFFSET(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (dst->count == 0 || dst->ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /* fr2-fr31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) start = dst->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) end = min(((unsigned int)ELF_FP_OFFSET(32)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) dst->pos + dst->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) dst->ret = user_regset_copyin(&dst->pos, &dst->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (dst->ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (start & 0xF) { /* only write high part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) &fpreg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) dst->ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) = fpreg.u.bits[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) start &= ~0xFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (end & 0xF) { /* only write low part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) &fpreg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) dst->ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) = fpreg.u.bits[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) end = (end + 0xF) & ~0xFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) index = start / sizeof(elf_fpreg_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (unw_set_fr(info, index, tmp[index - 2])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) dst->ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (dst->ret || dst->count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) /* fph */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ia64_sync_fph(dst->target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) dst->ret = user_regset_copyin(&dst->pos, &dst->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) &dst->u.set.kbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) &dst->u.set.ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) &dst->target->thread.fph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) ELF_FP_OFFSET(32), -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) unwind_and_call(void (*call)(struct unw_frame_info *, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) struct task_struct *target, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (target == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) unw_init_running(call, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct unw_frame_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) unw_init_from_blocked_task(&info, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) (*call)(&info, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) do_regset_call(void (*call)(struct unw_frame_info *, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) struct regset_getset info = { .target = target, .regset = regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) .pos = pos, .count = count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) .u.set = { .kbuf = kbuf, .ubuf = ubuf },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) .ret = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) unwind_and_call(call, target, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return info.ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) gpregs_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) struct regset_membuf info = {.to = to};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) unwind_and_call(do_gpregs_get, target, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return info.ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) static int gpregs_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) return do_regset_call(do_gpregs_set, target, regset, pos, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) kbuf, ubuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) do_sync_rbs(info, ia64_sync_user_rbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * This is called to write back the register backing store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * ptrace does this before it stops, so that a tracer reading the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * memory after the thread stops will get the current register data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) gpregs_writeback(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) int now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) set_notify_resume(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) fpregs_active(struct task_struct *target, const struct user_regset *regset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) static int fpregs_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) struct regset_membuf info = {.to = to};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) unwind_and_call(do_fpregs_get, target, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) return info.ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) static int fpregs_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return do_regset_call(do_fpregs_set, target, regset, pos, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) kbuf, ubuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) access_uarea(struct task_struct *child, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) unsigned long *data, int write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) unsigned int pos = -1; /* an invalid value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) unsigned long *ptr, regnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if ((addr & 0x7) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) dprintk("ptrace: unaligned register address 0x%lx\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) (addr >= PT_R7 + 8 && addr < PT_B1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) dprintk("ptrace: rejecting access to register "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) "address 0x%lx\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) case PT_F32 ... (PT_F127 + 15):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) pos = addr - PT_F32 + ELF_FP_OFFSET(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) case PT_F2 ... (PT_F5 + 15):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) pos = addr - PT_F2 + ELF_FP_OFFSET(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) case PT_F10 ... (PT_F31 + 15):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) pos = addr - PT_F10 + ELF_FP_OFFSET(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) case PT_F6 ... (PT_F9 + 15):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) pos = addr - PT_F6 + ELF_FP_OFFSET(6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (pos != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) unsigned reg = pos / sizeof(elf_fpreg_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) int which_half = (pos / sizeof(unsigned long)) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (reg < 32) { /* fr2-fr31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) struct unw_frame_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) elf_fpreg_t fpreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) unw_init_from_blocked_task(&info, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (unw_unwind_to_user(&info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (unw_get_fr(&info, reg, &fpreg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (write_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) fpreg.u.bits[which_half] = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (unw_set_fr(&info, reg, fpreg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) *data = fpreg.u.bits[which_half];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) } else { /* fph */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) elf_fpreg_t *p = &child->thread.fph[reg - 32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) unsigned long *bits = &p->u.bits[which_half];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) ia64_sync_fph(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) *bits = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) else if (child->thread.flags & IA64_THREAD_FPH_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) *data = *bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) *data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) case PT_NAT_BITS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) pos = ELF_NAT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) case PT_R4 ... PT_R7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) pos = addr - PT_R4 + ELF_GR_OFFSET(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) case PT_B1 ... PT_B5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) pos = addr - PT_B1 + ELF_BR_OFFSET(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) case PT_AR_EC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) pos = ELF_AR_EC_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) case PT_AR_LC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) pos = ELF_AR_LC_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) case PT_CR_IPSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) pos = ELF_CR_IPSR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) case PT_CR_IIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) pos = ELF_CR_IIP_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) case PT_CFM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) pos = ELF_CFM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) case PT_AR_UNAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) pos = ELF_AR_UNAT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) case PT_AR_PFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) pos = ELF_AR_PFS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) case PT_AR_RSC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) pos = ELF_AR_RSC_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) case PT_AR_RNAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) pos = ELF_AR_RNAT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) case PT_AR_BSPSTORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) pos = ELF_AR_BSPSTORE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) case PT_PR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) pos = ELF_PR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) case PT_B6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) pos = ELF_BR_OFFSET(6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) case PT_AR_BSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) pos = ELF_AR_BSP_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) case PT_R1 ... PT_R3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) pos = addr - PT_R1 + ELF_GR_OFFSET(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) case PT_R12 ... PT_R15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) pos = addr - PT_R12 + ELF_GR_OFFSET(12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) case PT_R8 ... PT_R11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) pos = addr - PT_R8 + ELF_GR_OFFSET(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) case PT_R16 ... PT_R31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) pos = addr - PT_R16 + ELF_GR_OFFSET(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) case PT_AR_CCV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) pos = ELF_AR_CCV_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) case PT_AR_FPSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) pos = ELF_AR_FPSR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) case PT_B0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) pos = ELF_BR_OFFSET(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) case PT_B7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) pos = ELF_BR_OFFSET(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) case PT_AR_CSD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) pos = ELF_AR_CSD_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) case PT_AR_SSD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) pos = ELF_AR_SSD_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (pos != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct unw_frame_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) unw_init_from_blocked_task(&info, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (unw_unwind_to_user(&info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) return access_elf_reg(child, &info, pos, data, write_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /* access debug registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (addr >= PT_IBR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) regnum = (addr - PT_IBR) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) ptr = &child->thread.ibr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) regnum = (addr - PT_DBR) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) ptr = &child->thread.dbr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (regnum >= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) dprintk("ptrace: rejecting access to register "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) "address 0x%lx\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) child->thread.flags |= IA64_THREAD_DBG_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) memset(child->thread.dbr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) sizeof(child->thread.dbr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) memset(child->thread.ibr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) sizeof(child->thread.ibr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) ptr += regnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if ((regnum & 1) && write_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) /* don't let the user set kernel-level breakpoints: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) *ptr = *data & ~(7UL << 56);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (write_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) *ptr = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) *data = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) static const struct user_regset native_regsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) .core_note_type = NT_PRSTATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) .n = ELF_NGREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) .regset_get = gpregs_get, .set = gpregs_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) .writeback = gpregs_writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) .core_note_type = NT_PRFPREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) .n = ELF_NFPREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) .regset_get = fpregs_get, .set = fpregs_set, .active = fpregs_active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) static const struct user_regset_view user_ia64_view = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) .name = "ia64",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) .e_machine = EM_IA_64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return &user_ia64_view;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) struct syscall_get_set_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) unsigned long *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct pt_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) int rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct syscall_get_set_args *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) struct pt_regs *pt = args->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) unsigned long *krbs, cfm, ndirty, nlocals, nouts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) int i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (unw_unwind_to_user(info) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * We get here via a few paths:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * - break instruction: cfm is shared with caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * syscall args are in out= regs, locals are non-empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * - epsinstruction: cfm is set by br.call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * locals don't exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * For both cases argguments are reachable in cfm.sof - cfm.sol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) cfm = pt->cr_ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) nlocals = (cfm >> 7) & 0x7f; /* aka sol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (in_syscall(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) count = min_t(int, args->n, nouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) /* Iterate over outs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) int j = ndirty + nlocals + i + args->i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (args->rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) *ia64_rse_skip_regs(krbs, j) = args->args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) args->args[i] = *ia64_rse_skip_regs(krbs, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (!args->rw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) while (i < args->n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) args->args[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) void ia64_syscall_get_set_arguments(struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) struct pt_regs *regs, unsigned long *args, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) struct syscall_get_set_args data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) .i = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) .n = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) .args = args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) .regs = regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) .rw = rw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (task == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) unw_init_running(syscall_get_set_args_cb, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) struct unw_frame_info ufi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) memset(&ufi, 0, sizeof(ufi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) unw_init_from_blocked_task(&ufi, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) syscall_get_set_args_cb(&ufi, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }