^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * @file backtrace.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * @remark Copyright 2004 Silicon Graphics Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * @remark Read the file COPYING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * @author Greg Banks <gnb@melbourne.sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * @author Keith Owens <kaos@melbourne.sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Based on work done for the ia64 port of the SGI kernprof patch, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (c) 2003-2004 Silicon Graphics Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/oprofile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * For IA64 we need to perform a complex little dance to get both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * the struct pt_regs and a synthetic struct switch_stack in place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * to allow the unwind code to work. This dance requires our unwind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * using code to be called from a function called from unw_init_running().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * There we only get a single void* data pointer, so use this struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * to hold all the data we need during the unwind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) typedef struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct pt_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct unw_frame_info frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long *prev_pfs_loc; /* state for WAR for old spinlock ool code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) } ia64_backtrace_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Returns non-zero if the PC is in the Interrupt Vector Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static __inline__ int in_ivt_code(unsigned long pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) extern char ia64_ivt[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return (pc >= (u_long)ia64_ivt && pc < (u_long)ia64_ivt+32768);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Unwind to next stack frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static __inline__ int next_frame(ia64_backtrace_t *bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Avoid unsightly console message from unw_unwind() when attempting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * to unwind through the Interrupt Vector Table which has no unwind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (in_ivt_code(bt->frame.ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * WAR for spinlock contention from leaf functions. ia64_spinlock_contention_pre3_4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * has ar.pfs == r0. Leaf functions do not modify ar.pfs so ar.pfs remains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * as 0, stopping the backtrace. Record the previous ar.pfs when the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * IP is in ia64_spinlock_contention_pre3_4 then unwind, if pfs_loc has not changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * after unwind then use pt_regs.ar_pfs which is where the real ar.pfs is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * leaf functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (bt->prev_pfs_loc && bt->regs && bt->frame.pfs_loc == bt->prev_pfs_loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) bt->frame.pfs_loc = &bt->regs->ar_pfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) bt->prev_pfs_loc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return unw_unwind(&bt->frame) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void do_ia64_backtrace(struct unw_frame_info *info, void *vdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ia64_backtrace_t *bt = vdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct switch_stack *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u_long pc, sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) sw = (struct switch_stack *)(info+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* padding from unw_init_running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) sw = (struct switch_stack *)(((unsigned long)sw + 15) & ~15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unw_init_frame_info(&bt->frame, current, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* skip over interrupt frame and oprofile calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unw_get_sp(&bt->frame, &sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (sp >= (u_long)bt->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (!next_frame(bt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) } while (count++ < 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* finally, grab the actual sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) while (bt->depth-- && next_frame(bt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unw_get_ip(&bt->frame, &pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) oprofile_add_trace(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (unw_is_intr_frame(&bt->frame)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Interrupt received on kernel stack; this can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * happen when timer interrupt fires while processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * a softirq from the tail end of a hardware interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * which interrupted a system call. Don't laugh, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * happens! Splice the backtrace into two parts to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * avoid spurious cycles in the gprof output.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* TODO: split rather than drop the 2nd half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ia64_backtrace(struct pt_regs * const regs, unsigned int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ia64_backtrace_t bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * On IA64 there is little hope of getting backtraces from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * user space programs -- the problems of getting the unwind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * information from arbitrary user programs are extreme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) bt.depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) bt.regs = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) bt.prev_pfs_loc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unw_init_running(do_ia64_backtrace, &bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }