^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/arm64/include/asm/ftrace.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2013 Linaro Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef __ASM_FTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define __ASM_FTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/insn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define HAVE_FUNCTION_GRAPH_FP_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define ARCH_SUPPORTS_FTRACE_OPS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define MCOUNT_ADDR ((unsigned long)_mcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* The BL at the callsite's adjusted rec->ip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define FTRACE_PLT_IDX 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define FTRACE_REGS_PLT_IDX 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define NR_FTRACE_PLTS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Currently, gcc tends to save the link register after the local variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * on the stack. This causes the max stack tracer to report the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * frame sizes for the wrong functions. By defining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * to find the return address on the stack after the local variables have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * been set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Note, this may change in the future, and we will need to deal with that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * if it were to happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define ARCH_FTRACE_SHIFT_STACK_TRACER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) extern void _mcount(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) extern void *return_address(unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct dyn_arch_ftrace {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* No extra data needed for arm64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) extern unsigned long ftrace_graph_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) extern void return_to_handler(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline unsigned long ftrace_call_adjust(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Adjust addr to point at the BL in the callsite.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * See ftrace_init_nop() for the callsite sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return addr + AARCH64_INSN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * addr is the address of the mcount call instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * recordmcount does the necessary offset calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct dyn_ftrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define ftrace_init_nop ftrace_init_nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define ftrace_return_address(n) return_address(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Because AArch32 mode does not share the same syscall table with AArch64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * tracing compat syscalls may result in reporting bogus syscalls or even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * hang-up, so just do not trace them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * See kernel/trace/trace_syscalls.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * x86 code says:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * If the user really wants these, then they should use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * raw syscall tracepoints with filtering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return is_compat_task();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline bool arch_syscall_match_sym_name(const char *sym,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Since all syscall functions have __arm64_ prefix, we must skip it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * However, as we described above, we decided to ignore compat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * syscalls, so we don't care about __arm64_compat_ prefix here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return !strcmp(sym + 8, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif /* ifndef __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif /* __ASM_FTRACE_H */