^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 1998-2004 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Stephane Eranian <eranian@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2003 Intel Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Suresh Siddha <suresh.b.siddha@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Fenghua Yu <fenghua.yu@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Arun Sharma <arun.sharma@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * 12/07/98 S. Eranian added pt_regs & switch_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * 12/21/98 D. Mosberger updated to match latest code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #ifndef _ASM_IA64_PTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define _ASM_IA64_PTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #ifndef ASM_OFFSETS_C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <uapi/asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Base-2 logarithm of number of pages to allocate per task structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * (including register backing store and memory stack):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) # define KERNEL_STACK_SIZE_ORDER 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) # define KERNEL_STACK_SIZE_ORDER 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) # define KERNEL_STACK_SIZE_ORDER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) # define KERNEL_STACK_SIZE_ORDER 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define KERNEL_STACK_SIZE IA64_STK_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/current.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * We use the ia64_psr(regs)->ri to determine which of the three
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * instructions in bundle (16 bytes) took the sample. Generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * the canonical representation by adding to instruction pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline unsigned long user_stack_pointer(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return regs->r12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline int is_syscall_success(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return regs->r10 != -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline long regs_return_value(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (is_syscall_success(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return regs->r8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return -regs->r8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Conserve space in histogram by encoding slot bits in address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * bits 2 and 3 rather than bits 0 and 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define profile_pc(regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned long __ip = instruction_pointer(regs); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) (__ip & ~3UL) + ((__ip & 3UL) << 2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* given a pointer to a task_struct, return the user's pt_regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) # define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) # define fsys_mode(task,regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct task_struct *_task = (task); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct pt_regs *_regs = (regs); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) !user_mode(_regs) && user_stack(_task, _regs); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * System call handlers that, upon successful completion, need to return a negative value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * should call force_successful_syscall_return() right before returning. On architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * flag will not get set. On architectures which do not support a separate error flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * the macro is a no-op and the spurious error condition needs to be filtered out by some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * or something along those lines).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) # define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct task_struct; /* forward decl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct unw_frame_info; /* forward decl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long, long *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) extern void ia64_flush_fph (struct task_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) extern void ia64_sync_fph (struct task_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) extern void ia64_sync_krbs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) extern void ia64_increment_ip (struct pt_regs *pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) extern void ia64_decrement_ip (struct pt_regs *pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) extern void ia64_ptrace_stop(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define arch_ptrace_stop(code, info) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ia64_ptrace_stop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define arch_ptrace_stop_needed(code, info) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) (!test_thread_flag(TIF_RESTORE_RSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) extern void ptrace_attach_sync_user_rbs (struct task_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define arch_ptrace_attach(child) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ptrace_attach_sync_user_rbs(child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define arch_has_single_step() (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define arch_has_block_step() (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif /* !__ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif /* _ASM_IA64_PTRACE_H */