^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include "unwind.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include "dso.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include "map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include "thread.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "env.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "callchain.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) maps->unwind_libunwind_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) const char *arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) enum dso_type dso_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (!dwarf_callchain_users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (maps->addr_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pr_debug("unwind: thread map already set, dso=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) map->dso->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* env->arch is NULL for live-mode (i.e. perf top) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (!maps->machine->env || !maps->machine->env->arch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) goto out_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) dso_type = dso__type(map->dso, maps->machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (dso_type == DSO__TYPE_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) arch = perf_env__arch(maps->machine->env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (!strcmp(arch, "x86")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (dso_type != DSO__TYPE_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ops = x86_32_unwind_libunwind_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (dso_type == DSO__TYPE_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) ops = arm64_unwind_libunwind_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) pr_err("unwind: target platform=%s is not supported\n", arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) out_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unwind__register_ops(maps, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) err = maps->unwind_libunwind_ops->prepare_access(maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *initialized = err ? false : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void unwind__flush_access(struct maps *maps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (maps->unwind_libunwind_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) maps->unwind_libunwind_ops->flush_access(maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void unwind__finish_access(struct maps *maps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (maps->unwind_libunwind_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) maps->unwind_libunwind_ops->finish_access(maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct thread *thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct perf_sample *data, int max_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (thread->maps->unwind_libunwind_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }