Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Stack trace utility functions etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright 2008 Christoph Hellwig, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright 2018 SUSE Linux GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/paca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * Save stack-backtrace addresses into a stack_trace buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static void save_context_stack(struct stack_trace *trace, unsigned long sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 			struct task_struct *tsk, int savesched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		unsigned long *stack = (unsigned long *) sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		unsigned long newsp, ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		newsp = stack[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		ip = stack[STACK_FRAME_LR_SAVE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		if (savesched || !in_sched_functions(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 			if (!trace->skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 				trace->entries[trace->nr_entries++] = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 				trace->skip--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		if (trace->nr_entries >= trace->max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		sp = newsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) void save_stack_trace(struct stack_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	unsigned long sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	sp = current_stack_frame();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	save_context_stack(trace, sp, current, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) EXPORT_SYMBOL_GPL(save_stack_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	unsigned long sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (!try_get_task_stack(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (tsk == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		sp = current_stack_frame();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		sp = tsk->thread.ksp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	save_context_stack(trace, sp, tsk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	put_task_stack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	save_context_stack(trace, regs->gpr[1], current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) EXPORT_SYMBOL_GPL(save_stack_trace_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * This function returns an error if it detects any unreliable features of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * stack.  Otherwise it guarantees that the stack trace is reliable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * If the task is not 'current', the caller *must* ensure the task is inactive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 					   struct stack_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	unsigned long sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned long newsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	unsigned long stack_page = (unsigned long)task_stack_page(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	unsigned long stack_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	int graph_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	bool firstframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	stack_end = stack_page + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (!is_idle_task(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		 * For user tasks, this is the SP value loaded on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		 * system_call_common()/EXCEPTION_PROLOG_COMMON().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		 * Likewise for non-swapper kernel threads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		 * this also happens to be the top of the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		 * as setup by copy_thread().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		 * Note that stack backlinks are not properly setup by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		 * copy_thread() and thus, a forked task() will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		 * an unreliable stack trace until it's been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		 * _switch()'ed to for the first time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		 * idle tasks have a custom stack layout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		 * c.f. cpu_idle_thread_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		stack_end -= STACK_FRAME_OVERHEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (tsk == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		sp = current_stack_frame();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		sp = tsk->thread.ksp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	if (sp < stack_page + sizeof(struct thread_struct) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	    sp > stack_end - STACK_FRAME_MIN_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	for (firstframe = true; sp != stack_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	     firstframe = false, sp = newsp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		unsigned long *stack = (unsigned long *) sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		unsigned long ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		/* sanity check: ABI requires SP to be aligned 16 bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		if (sp & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		newsp = stack[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		/* Stack grows downwards; unwinder may only go up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		if (newsp <= sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		if (newsp != stack_end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		    newsp > stack_end - STACK_FRAME_MIN_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			return -EINVAL; /* invalid backlink, too far up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		 * We can only trust the bottom frame's backlink, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		 * rest of the frame may be uninitialized, continue to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		 * the next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		if (firstframe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		/* Mark stacktraces with exception frames as unreliable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		    stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		/* Examine the saved LR: it must point into kernel code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		ip = stack[STACK_FRAME_LR_SAVE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		if (!__kernel_text_address(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		 * FIXME: IMHO these tests do not belong in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		 * arch-dependent code, they are generic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #ifdef CONFIG_KPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		 * Mark stacktraces with kretprobed functions on them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		 * as unreliable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		if (ip == (unsigned long)kretprobe_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		if (trace->nr_entries >= trace->max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		if (!trace->skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			trace->entries[trace->nr_entries++] = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			trace->skip--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int save_stack_trace_tsk_reliable(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				  struct stack_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 * If the task doesn't have a stack (e.g., a zombie), the stack is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 * "reliably" empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (!try_get_task_stack(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	ret = __save_stack_trace_tsk_reliable(tsk, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	put_task_stack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void handle_backtrace_ipi(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	nmi_cpu_backtrace(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static void raise_backtrace_ipi(cpumask_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	struct paca_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	u64 delay_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	for_each_cpu(cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		if (cpu == smp_processor_id()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			handle_backtrace_ipi(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		delay_us = 5 * USEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			// Now wait up to 5s for the other CPU to do its backtrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			while (cpumask_test_cpu(cpu, mask) && delay_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 				delay_us--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			// Other CPU cleared itself from the mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			if (delay_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		p = paca_ptrs[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		cpumask_clear_cpu(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		if (!virt_addr_valid(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			pr_warn("paca pointer appears corrupt? (%px)\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			p->irq_soft_mask, p->in_mce, p->in_nmi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		if (virt_addr_valid(p->__current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			pr_cont(" current: %d (%s)\n", p->__current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 				p->__current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			pr_cont(" current pointer corrupt? (%px)\n", p->__current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */