^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/include/asm/ptrace.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1996-2003 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef __ASM_PTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define __ASM_PTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <uapi/asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* Current Exception Level values, as contained in CurrentEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define CurrentEL_EL1 (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define CurrentEL_EL2 (2 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define INIT_PSTATE_EL1 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL1h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define INIT_PSTATE_EL2 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * PMR values used to mask/unmask interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * GIC priority masking works as follows: if an IRQ's priority is a higher value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * than the value held in PMR, that IRQ is masked. Lowering the value of PMR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * means masking more IRQs (or at least that the same IRQs remain masked).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * To mask interrupts, we clear the most significant bit of PMR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Some code sections either automatically switch back to PSR.I or explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * in the priority mask, it indicates that PSR.I should be set and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * interrupt disabling temporarily does not rely on IRQ priorities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define GIC_PRIO_IRQON 0xe0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define __GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define __GIC_PRIO_IRQOFF_NS 0xa0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define GIC_PRIO_PSR_I_SET (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define GIC_PRIO_IRQOFF \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) extern struct static_key_false gic_nonsecure_priorities;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u8 __prio = __GIC_PRIO_IRQOFF; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (static_branch_unlikely(&gic_nonsecure_priorities)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __prio = __GIC_PRIO_IRQOFF_NS; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) __prio; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Additional SPSR bits not exposed in the UABI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define PSR_MODE_THREAD_BIT (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define PSR_IL_BIT (1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* AArch32-specific ptrace requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define COMPAT_PTRACE_GETREGS 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define COMPAT_PTRACE_SETREGS 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define COMPAT_PTRACE_GET_THREAD_AREA 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define COMPAT_PTRACE_SET_SYSCALL 23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define COMPAT_PTRACE_GETVFPREGS 27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define COMPAT_PTRACE_SETVFPREGS 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define COMPAT_PTRACE_GETHBPREGS 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define COMPAT_PTRACE_SETHBPREGS 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* SPSR_ELx bits for exceptions taken from AArch32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define PSR_AA32_MODE_MASK 0x0000001f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define PSR_AA32_MODE_USR 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define PSR_AA32_MODE_FIQ 0x00000011
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define PSR_AA32_MODE_IRQ 0x00000012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define PSR_AA32_MODE_SVC 0x00000013
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define PSR_AA32_MODE_ABT 0x00000017
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define PSR_AA32_MODE_HYP 0x0000001a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define PSR_AA32_MODE_UND 0x0000001b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define PSR_AA32_MODE_SYS 0x0000001f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define PSR_AA32_T_BIT 0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define PSR_AA32_F_BIT 0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define PSR_AA32_I_BIT 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define PSR_AA32_A_BIT 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define PSR_AA32_E_BIT 0x00000200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define PSR_AA32_PAN_BIT 0x00400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define PSR_AA32_SSBS_BIT 0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define PSR_AA32_DIT_BIT 0x01000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define PSR_AA32_Q_BIT 0x08000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define PSR_AA32_V_BIT 0x10000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define PSR_AA32_C_BIT 0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define PSR_AA32_Z_BIT 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define PSR_AA32_N_BIT 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define PSR_AA32_GE_MASK 0x000f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define PSR_AA32_ENDSTATE PSR_AA32_E_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define PSR_AA32_ENDSTATE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* AArch32 CPSR bits, as seen in AArch32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define COMPAT_PSR_DIT_BIT 0x00200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * These are 'magic' values for PTRACE_PEEKUSR that return info about where a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * process is located in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define COMPAT_PT_TEXT_ADDR 0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define COMPAT_PT_DATA_ADDR 0x10004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define COMPAT_PT_TEXT_END_ADDR 0x10008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * If pt_regs.syscallno == NO_SYSCALL, then the thread is not executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * a syscall -- i.e., its most recent entry into the kernel from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * userspace was not via SVC, or otherwise a tracer cancelled the syscall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * This must have the value -1, for ABI compatibility with ptrace etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define NO_SYSCALL (-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* sizeof(struct user) for AArch32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define COMPAT_USER_SZ 296
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Architecturally defined mapping between AArch32 and AArch64 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define compat_usr(x) regs[(x)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define compat_fp regs[11]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define compat_sp regs[13]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define compat_lr regs[14]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define compat_sp_hyp regs[15]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define compat_lr_irq regs[16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define compat_sp_irq regs[17]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define compat_lr_svc regs[18]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define compat_sp_svc regs[19]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define compat_lr_abt regs[20]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define compat_sp_abt regs[21]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define compat_lr_und regs[22]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define compat_sp_und regs[23]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define compat_r8_fiq regs[24]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define compat_r9_fiq regs[25]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define compat_r10_fiq regs[26]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define compat_r11_fiq regs[27]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define compat_r12_fiq regs[28]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define compat_sp_fiq regs[29]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define compat_lr_fiq regs[30]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pstate = psr & ~COMPAT_PSR_DIT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (psr & COMPAT_PSR_DIT_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) pstate |= PSR_AA32_DIT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned long psr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) psr = pstate & ~PSR_AA32_DIT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (pstate & PSR_AA32_DIT_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) psr |= COMPAT_PSR_DIT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return psr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * This struct defines the way the registers are stored on the stack during an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * stack alignment). struct user_pt_regs must form a prefix of struct pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct pt_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct user_pt_regs user_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u64 regs[31];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u64 sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u64 pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u64 pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u64 orig_x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #ifdef __AARCH64EB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u32 unused2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) s32 syscallno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) s32 syscallno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 unused2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u64 orig_addr_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u64 pmr_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u64 stackframe[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Only valid for some EL1 exceptions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u64 lockdep_hardirqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u64 exit_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static inline bool in_syscall(struct pt_regs const *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return regs->syscallno != NO_SYSCALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static inline void forget_syscall(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) regs->syscallno = NO_SYSCALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define arch_has_single_step() (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define compat_thumb_mode(regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) (((regs)->pstate & PSR_AA32_T_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define compat_thumb_mode(regs) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define user_mode(regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) (((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define compat_user_mode(regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) (((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) (PSR_MODE32_BIT | PSR_MODE_EL0t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define processor_mode(regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ((regs)->pstate & PSR_MODE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define irqs_priority_unmasked(regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) (system_uses_irq_prio_masking() ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) (regs)->pmr_save == GIC_PRIO_IRQON : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define interrupts_enabled(regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #define fast_interrupts_enabled(regs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) (!((regs)->pstate & PSR_F_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static inline unsigned long user_stack_pointer(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (compat_user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return regs->compat_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) extern int regs_query_register_offset(const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned int n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * regs_get_register() - get register value from its offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @regs: pt_regs from which register value is gotten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @offset: offset of the register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * regs_get_register returns the value of a register whose offset from @regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * The @offset is the offset of the register in struct pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) WARN_ON(offset & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) offset >>= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) case 0 ... 30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) val = regs->regs[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) case offsetof(struct pt_regs, sp) >> 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) val = regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) case offsetof(struct pt_regs, pc) >> 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) val = regs->pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case offsetof(struct pt_regs, pstate) >> 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) val = regs->pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * Read a register given an architectural register index r.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * This handles the common case where 31 means XZR, not SP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static inline unsigned long pt_regs_read_reg(const struct pt_regs *regs, int r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return (r == 31) ? 0 : regs->regs[r];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Write a register given an architectural register index r.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * This handles the common case where 31 means XZR, not SP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static inline void pt_regs_write_reg(struct pt_regs *regs, int r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (r != 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) regs->regs[r] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* Valid only for Kernel mode traps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static inline unsigned long regs_return_value(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) unsigned long val = regs->regs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Audit currently uses regs_return_value() instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * syscall_get_return_value(). Apply the same sign-extension here until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * audit is updated to use syscall_get_return_value().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (compat_user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) val = sign_extend64(val, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) regs->regs[0] = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * regs_get_kernel_argument() - get Nth function argument in kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @regs: pt_regs of that context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * @n: function argument number (start from 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * regs_get_argument() returns @n th argument of the function call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Note that this chooses the most likely register mapping. In very rare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * cases this may not return correct data, for example, if one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * function parameters is 16 bytes or bigger. In such cases, we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * get access the parameter correctly and the register assignment of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * subsequent parameters will be shifted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #define NR_REG_ARGUMENTS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (n < NR_REG_ARGUMENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return pt_regs_read_reg(regs, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* We must avoid circular header include via sched.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static inline unsigned long instruction_pointer(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return regs->pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static inline void instruction_pointer_set(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) regs->pc = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static inline unsigned long frame_pointer(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return regs->regs[29];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #define procedure_link_pointer(regs) ((regs)->regs[30])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static inline void procedure_link_pointer_set(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) procedure_link_pointer(regs) = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) extern unsigned long profile_pc(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #endif