^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2001 PPC64 Team, IBM Corp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This struct defines the way the registers are stored on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * kernel stack during a system call or other kernel entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * this should only contain volatile regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * since we can keep non-volatile in the thread_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * should set this up when only volatiles are saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * by intr code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * that the overall structure is a multiple of 16 bytes in length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Note that the offsets of the fields in this struct correspond with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #ifndef _ASM_POWERPC_PTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define _ASM_POWERPC_PTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <uapi/asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/asm-const.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct user_pt_regs user_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long gpr[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long orig_gpr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned long link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned long xer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long ccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long softe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long mq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long ppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #ifdef CONFIG_PPC_KUAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long kuap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Size of redzone that userspace is allowed to use below the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * the new ELFv2 little-endian ABI, so we allow the larger amount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * For kernel code we allow a 288-byte redzone, in order to conserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * kernel stack space; gcc currently only uses 288 bytes, and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * hopefully allow explicit control of the redzone size in future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define USER_REDZONE_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define KERNEL_REDZONE_SIZE 288
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define STACK_FRAME_MARKER 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #ifdef PPC64_ELF_ABI_v2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define STACK_FRAME_MIN_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Size of dummy stack frame allocated when calling signal handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define __SIGNAL_FRAMESIZE 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define __SIGNAL_FRAMESIZE32 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #else /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define USER_REDZONE_SIZE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define KERNEL_REDZONE_SIZE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define STACK_FRAME_MARKER 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* Size of stack frame allocated when calling signal handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define __SIGNAL_FRAMESIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline unsigned long instruction_pointer(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return regs->nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline void instruction_pointer_set(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) regs->nip = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline unsigned long user_stack_pointer(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return regs->gpr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline unsigned long frame_pointer(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) extern unsigned long profile_pc(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define profile_pc(regs) instruction_pointer(regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) long do_syscall_trace_enter(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void do_syscall_trace_leave(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define force_successful_syscall_return() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) set_thread_flag(TIF_NOERROR); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) extern int ptrace_get_reg(struct task_struct *task, int regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned long *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) extern int ptrace_put_reg(struct task_struct *task, int regno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned long data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define current_pt_regs() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define TRAP_FLAGS_MASK 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define FULL_REGS(regs) true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define SET_FULL_REGS(regs) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define TRAP_FLAGS_MASK 0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define FULL_REGS(regs) (((regs)->trap & 1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define NV_REG_POISON 0xdeadbeefdeadbeefUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * We use the least-significant bit of the trap field to indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * whether we have saved the full set of registers, or only a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * partial set. A 1 there means the partial set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * On 4xx we use the next bit to indicate whether the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * is a critical exception (1 means it is).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define TRAP_FLAGS_MASK 0x1F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define FULL_REGS(regs) (((regs)->trap & 1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define NV_REG_POISON 0xdeadbeef
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define CHECK_FULL_REGS(regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if ((regs)->trap & 1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) printk(KERN_CRIT "%s: partial register set\n", __func__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #endif /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static inline void set_trap(struct pt_regs *regs, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static inline bool trap_is_scv(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static inline bool trap_is_syscall(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return (trap_is_scv(regs) || TRAP(regs) == 0xc00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static inline bool trap_norestart(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return regs->trap & 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static inline void set_trap_norestart(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) regs->trap |= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define kernel_stack_pointer(regs) ((regs)->gpr[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static inline int is_syscall_success(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (trap_is_scv(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return !(regs->ccr & 0x10000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline long regs_return_value(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (trap_is_scv(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return regs->gpr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (is_syscall_success(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return regs->gpr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -regs->gpr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) regs->gpr[3] = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #define arch_has_single_step() (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #define arch_has_block_step() (true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #define ARCH_HAS_USER_SINGLE_STEP_REPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * kprobe-based event tracer support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #include <linux/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) extern int regs_query_register_offset(const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) extern const char *regs_query_register_name(unsigned int offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * regs_get_register() - get register value from its offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * @regs: pt_regs from which register value is gotten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * @offset: offset number of the register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * regs_get_register returns the value of a register whose offset from @regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * The @offset is the offset of the register in struct pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static inline unsigned long regs_get_register(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (unlikely(offset > MAX_REG_OFFSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return *(unsigned long *)((unsigned long)regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * regs_within_kernel_stack() - check the address in the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * @regs: pt_regs which contains kernel stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * @addr: address which is checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * If @addr is within the kernel stack, it returns true. If not, returns false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static inline bool regs_within_kernel_stack(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return ((addr & ~(THREAD_SIZE - 1)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * regs_get_kernel_stack_nth() - get Nth entry of the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @regs: pt_regs which contains kernel stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * @n: stack entry number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * is specified by @regs. If the @n th entry is NOT in the kernel stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * this returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) addr += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (regs_within_kernel_stack(regs, (unsigned long)addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #ifndef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* We need PT_SOFTE defined at all time to avoid #ifdefs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define PT_SOFTE PT_MQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #else /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #define PT_VRSAVE_32 (PT_VR0 + 33*4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #endif /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #endif /* _ASM_POWERPC_PTRACE_H */