^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /* Kernel callchain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) struct stackframe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) unsigned long fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) unsigned long lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static int unwind_frame_kernel(struct stackframe *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned long low = (unsigned long)task_stack_page(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) unsigned long high = low + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (unlikely(frame->fp < low || frame->fp > high))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (kstack_end((void *)frame->fp) || frame->fp & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *frame = *(struct stackframe *)frame->fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (__kernel_text_address(frame->lr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int graph = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void notrace walk_stackframe(struct stackframe *fr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct perf_callchain_entry_ctx *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) perf_callchain_store(entry, fr->lr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) } while (unwind_frame_kernel(fr) >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Get the return address for a single stackframe and return a pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * next frame tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long fp, unsigned long reg_lr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct stackframe buftail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long lr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long __user *user_frame_tail = (unsigned long __user *)fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Check accessibility of one struct frame_tail beyond */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (!access_ok(user_frame_tail, sizeof(buftail)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (__copy_from_user_inatomic(&buftail, user_frame_tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) sizeof(buftail)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (reg_lr != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) lr = reg_lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) lr = buftail.lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) fp = buftail.fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) perf_callchain_store(entry, lr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * This will be called when the target is in user mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * This function will only be called when we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * "PERF_SAMPLE_CALLCHAIN" in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * kernel/events/core.c:perf_prepare_sample()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * How to trigger perf_callchain_[user/kernel] :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * $ perf record -e cpu-clock --call-graph fp ./program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * $ perf report --call-graph
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * On C-SKY platform, the program being sampled and the C library
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * need to be compiled with * -mbacktrace, otherwise the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * stack will not contain function frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long fp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* C-SKY does not support virtualization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (guest_cbs && guest_cbs->is_in_guest())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) fp = regs->regs[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) perf_callchain_store(entry, regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * While backtrace from leaf function, lr is normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * not saved inside frame on C-SKY, so get lr from pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * at the sample point. However, lr value can be incorrect if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * lr is used as temp register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) fp = user_backtrace(entry, fp, regs->lr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) fp = user_backtrace(entry, fp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct stackframe fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* C-SKY does not support virtualization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (guest_cbs && guest_cbs->is_in_guest()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) pr_warn("C-SKY does not support perf in guest mode!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) fr.fp = regs->regs[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) fr.lr = regs->lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) walk_stackframe(&fr, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }