Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2009  Matt Fleming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Based, in part, on kernel/time/clocksource.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * This file provides arbitration code for stack unwinders.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Multiple stack unwinders can be available on a system, usually with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * the most accurate unwinder being the currently active one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/unwinder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * This is the most basic stack unwinder an architecture can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * provide. For architectures without reliable frame pointers, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * RISC CPUs, it can be implemented by looking through the stack for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * addresses that lie within the kernel text section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Other CPUs, e.g. x86, can use their frame pointer register to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * construct more accurate stack traces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static struct list_head unwinder_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static struct unwinder stack_reader = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	.name = "stack-reader",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	.dump = stack_reader_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	.rating = 50,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	.list = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		.next = &unwinder_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		.prev = &unwinder_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * "curr_unwinder" points to the stack unwinder currently in use. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * is the unwinder with the highest rating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * "unwinder_list" is a linked-list of all available unwinders, sorted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * by rating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * All modifications of "curr_unwinder" and "unwinder_list" must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * performed whilst holding "unwinder_lock".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static struct unwinder *curr_unwinder = &stack_reader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) static struct list_head unwinder_list = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	.next = &stack_reader.list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	.prev = &stack_reader.list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static DEFINE_SPINLOCK(unwinder_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * select_unwinder - Select the best registered stack unwinder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Private function. Must hold unwinder_lock when called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * Select the stack unwinder with the best rating. This is useful for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * setting up curr_unwinder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static struct unwinder *select_unwinder(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct unwinder *best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (list_empty(&unwinder_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	best = list_entry(unwinder_list.next, struct unwinder, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (best == curr_unwinder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	return best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * Enqueue the stack unwinder sorted by rating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static int unwinder_enqueue(struct unwinder *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct list_head *tmp, *entry = &unwinder_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	list_for_each(tmp, &unwinder_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		struct unwinder *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		o = list_entry(tmp, struct unwinder, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		if (o == ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		/* Keep track of the place, where to insert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		if (o->rating >= ops->rating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			entry = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	list_add(&ops->list, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * unwinder_register - Used to install new stack unwinder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * @u: unwinder to be registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * Install the new stack unwinder on the unwinder list, which is sorted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * by rating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * Returns -EBUSY if registration fails, zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int unwinder_register(struct unwinder *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	spin_lock_irqsave(&unwinder_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	ret = unwinder_enqueue(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		curr_unwinder = select_unwinder();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	spin_unlock_irqrestore(&unwinder_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int unwinder_faulted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * Unwind the call stack and pass information to the stacktrace_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  * functions. Also handle the case where we need to switch to a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * stack dumper because the current one faulted unexpectedly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void unwind_stack(struct task_struct *task, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		  unsigned long *sp, const struct stacktrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		  void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	 * The problem with unwinders with high ratings is that they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	 * inherently more complicated than the simple ones with lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	 * ratings. We are therefore more likely to fault in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	 * complicated ones, e.g. hitting BUG()s. If we fault in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	 * code for the current stack unwinder we try to downgrade to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	 * one with a lower rating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 * Hopefully this will give us a semi-reliable stacktrace so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * can diagnose why curr_unwinder->dump() faulted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (unwinder_faulted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		spin_lock_irqsave(&unwinder_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		/* Make sure no one beat us to changing the unwinder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			list_del(&curr_unwinder->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			curr_unwinder = select_unwinder();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			unwinder_faulted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		spin_unlock_irqrestore(&unwinder_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	curr_unwinder->dump(task, regs, sp, ops, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) EXPORT_SYMBOL_GPL(unwind_stack);