^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __UNWIND_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __UNWIND_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "util/map_symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) struct maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) struct perf_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct unwind_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct map_symbol ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) u64 ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct unwind_libunwind_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) int (*prepare_access)(struct maps *maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void (*flush_access)(struct maps *maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void (*finish_access)(struct maps *maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int (*get_entries)(unwind_entry_cb_t cb, void *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct thread *thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct perf_sample *data, int max_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #ifdef HAVE_DWARF_UNWIND_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct thread *thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct perf_sample *data, int max_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* libunwind specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifdef HAVE_LIBUNWIND_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifndef LIBUNWIND__ARCH_REG_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arch_reg_id(regnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifndef LIBUNWIND__ARCH_REG_SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define LIBUNWIND__ARCH_REG_SP PERF_REG_SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #ifndef LIBUNWIND__ARCH_REG_IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define LIBUNWIND__ARCH_REG_IP PERF_REG_IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int LIBUNWIND__ARCH_REG_ID(int regnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) void unwind__flush_access(struct maps *maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void unwind__finish_access(struct maps *maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct map *map __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) bool *initialized __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void *arg __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct thread *thread __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct perf_sample *data __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int max_stack __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct map *map __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bool *initialized __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #endif /* HAVE_DWARF_UNWIND_SUPPORT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #endif /* __UNWIND_H */