Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * arch/arm/kernel/unwind.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2008 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Stack unwinding support for ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * An ARM EABI version of gcc is required to generate the unwind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * tables. For information about the structure of the unwind tables,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * see "Exception Handling ABI for the ARM Architecture" at:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #ifndef __CHECKER__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #if !defined (__ARM_EABI__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #warning Your compiler does not have EABI support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #warning    ARM unwind is known to compile only with EABI compilers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #warning    Change compiler or disable ARM_UNWIND option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #elif (__GNUC__ == 4 && __GNUC_MINOR__ <= 2) && !defined(__clang__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #warning Your compiler is too buggy; it is known to not compile ARM unwind support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #warning    Change compiler or disable ARM_UNWIND option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #endif /* __CHECKER__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /* Dummy functions to avoid linker complaints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) void __aeabi_unwind_cpp_pr0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) void __aeabi_unwind_cpp_pr1(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) void __aeabi_unwind_cpp_pr2(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) struct unwind_ctrl_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	unsigned long vrs[16];		/* virtual register set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	const unsigned long *insn;	/* pointer to the current instructions word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	unsigned long sp_high;		/* highest value of sp allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * 1 : check for stack overflow for each register pop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 * 0 : save overhead if there is plenty of stack remaining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	int check_each_pop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	int entries;			/* number of entries left to interpret */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	int byte;			/* current byte number in the instructions word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) enum regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #ifdef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	FP = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	FP = 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	SP = 13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	LR = 14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	PC = 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) extern const struct unwind_idx __start_unwind_idx[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static const struct unwind_idx *__origin_unwind_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) extern const struct unwind_idx __stop_unwind_idx[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static DEFINE_RAW_SPINLOCK(unwind_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static LIST_HEAD(unwind_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) /* Convert a prel31 symbol to an absolute address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define prel31_to_addr(ptr)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) ({							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	/* sign-extend to 32 bits */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	long offset = (((long)*(ptr)) << 1) >> 1;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	(unsigned long)(ptr) + offset;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * Binary search in the unwind index. The entries are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * guaranteed to be sorted in ascending order by the linker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * start = first entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * origin = first entry with positive offset (or stop if there is no such entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * stop - 1 = last entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static const struct unwind_idx *search_index(unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 				       const struct unwind_idx *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 				       const struct unwind_idx *origin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 				       const struct unwind_idx *stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	unsigned long addr_prel31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	pr_debug("%s(%08lx, %p, %p, %p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			__func__, addr, start, origin, stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * only search in the section with the matching sign. This way the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 * prel31 numbers can be compared as unsigned longs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (addr < (unsigned long)start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		/* negative offsets: [start; origin) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		stop = origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		/* positive offsets: [origin; stop) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		start = origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	/* prel31 for address relavive to start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	while (start < stop - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		const struct unwind_idx *mid = start + ((stop - start) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		 * As addr_prel31 is relative to start an offset is needed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		 * make it relative to mid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 				mid->addr_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			stop = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			/* keep addr_prel31 relative to start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			addr_prel31 -= ((unsigned long)mid -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 					(unsigned long)start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			start = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (likely(start->addr_offset <= addr_prel31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		pr_warn("unwind: Unknown symbol address %08lx\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static const struct unwind_idx *unwind_find_origin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		const struct unwind_idx *start, const struct unwind_idx *stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	pr_debug("%s(%p, %p)\n", __func__, start, stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	while (start < stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		const struct unwind_idx *mid = start + ((stop - start) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		if (mid->addr_offset >= 0x40000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			/* negative offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			start = mid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			/* positive offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			stop = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	pr_debug("%s -> %p\n", __func__, stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	return stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static const struct unwind_idx *unwind_find_idx(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	const struct unwind_idx *idx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	pr_debug("%s(%08lx)\n", __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (core_kernel_text(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		if (unlikely(!__origin_unwind_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			__origin_unwind_idx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 				unwind_find_origin(__start_unwind_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 						__stop_unwind_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		/* main unwind table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		idx = search_index(addr, __start_unwind_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 				   __origin_unwind_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 				   __stop_unwind_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		/* module unwind tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		struct unwind_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		raw_spin_lock_irqsave(&unwind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		list_for_each_entry(table, &unwind_tables, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			if (addr >= table->begin_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			    addr < table->end_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 				idx = search_index(addr, table->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 						   table->origin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 						   table->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 				/* Move-to-front to exploit common traces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 				list_move(&table->list, &unwind_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		raw_spin_unlock_irqrestore(&unwind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	pr_debug("%s: idx = %p\n", __func__, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (ctrl->entries <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		pr_warn("unwind: Corrupt unwind table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (ctrl->byte == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		ctrl->insn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		ctrl->entries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		ctrl->byte = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		ctrl->byte--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Before poping a register check whether it is feasible or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 				unsigned long **vsp, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (unlikely(ctrl->check_each_pop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		if (*vsp >= (unsigned long *)ctrl->sp_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	ctrl->vrs[reg] = *(*vsp)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return URC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* Helper functions to execute the instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 						unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	int load_sp, reg = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	load_sp = mask & (1 << (13 - 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	while (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		if (mask & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			if (unwind_pop_register(ctrl, &vsp, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 				return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		reg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if (!load_sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		ctrl->vrs[SP] = (unsigned long)vsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	return URC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 					unsigned long insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	/* pop R4-R[4+bbb] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	for (reg = 4; reg <= 4 + (insn & 7); reg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		if (unwind_pop_register(ctrl, &vsp, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 				return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (insn & 0x8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		if (unwind_pop_register(ctrl, &vsp, 14))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 				return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	ctrl->vrs[SP] = (unsigned long)vsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	return URC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 						unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	int reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/* pop R0-R3 according to mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	while (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		if (mask & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			if (unwind_pop_register(ctrl, &vsp, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 				return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		reg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	ctrl->vrs[SP] = (unsigned long)vsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	return URC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * Execute the current unwind instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	unsigned long insn = unwind_get_byte(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	int ret = URC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	pr_debug("%s: insn = %08lx\n", __func__, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if ((insn & 0xc0) == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	else if ((insn & 0xc0) == 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	else if ((insn & 0xf0) == 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		insn = (insn << 8) | unwind_get_byte(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		mask = insn & 0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		if (mask == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 				insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	} else if ((insn & 0xf0) == 0x90 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		   (insn & 0x0d) != 0x0d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	else if ((insn & 0xf0) == 0xa0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	} else if (insn == 0xb0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		if (ctrl->vrs[PC] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			ctrl->vrs[PC] = ctrl->vrs[LR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		/* no further processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		ctrl->entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	} else if (insn == 0xb1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		unsigned long mask = unwind_get_byte(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		if (mask == 0 || mask & 0xf0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			pr_warn("unwind: Spare encoding %04lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 				(insn << 8) | mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	} else if (insn == 0xb2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		unsigned long uleb128 = unwind_get_byte(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		pr_warn("unwind: Unhandled instruction %02lx\n", insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		 ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  * Unwind a single frame starting with *sp for the symbol at *pc. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  * updates the *pc and *sp with the new values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int unwind_frame(struct stackframe *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	unsigned long low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	const struct unwind_idx *idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	struct unwind_ctrl_block ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	/* store the highest address on the stack to avoid crossing it*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	low = frame->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	ctrl.sp_high = ALIGN(low, THREAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		 frame->pc, frame->lr, frame->sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (!kernel_text_address(frame->pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	idx = unwind_find_idx(frame->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (!idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		pr_warn("unwind: Index not found %08lx\n", frame->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	ctrl.vrs[FP] = frame->fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	ctrl.vrs[SP] = frame->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	ctrl.vrs[LR] = frame->lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	ctrl.vrs[PC] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if (idx->insn == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		/* can't unwind */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	else if ((idx->insn & 0x80000000) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		/* prel31 to the unwind table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	else if ((idx->insn & 0xff000000) == 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		/* only personality routine 0 supported in the index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		ctrl.insn = &idx->insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			idx->insn, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	/* check the personality routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if ((*ctrl.insn & 0xff000000) == 0x80000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		ctrl.byte = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		ctrl.entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	} else if ((*ctrl.insn & 0xff000000) == 0x81000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		ctrl.byte = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		pr_warn("unwind: Unsupported personality routine %08lx at %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			*ctrl.insn, ctrl.insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	ctrl.check_each_pop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	while (ctrl.entries > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		int urc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			ctrl.check_each_pop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		urc = unwind_exec_insn(&ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		if (urc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 			return urc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= ctrl.sp_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	if (ctrl.vrs[PC] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		ctrl.vrs[PC] = ctrl.vrs[LR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	/* check for infinite loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		return -URC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	frame->fp = ctrl.vrs[FP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	frame->sp = ctrl.vrs[SP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	frame->lr = ctrl.vrs[LR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	frame->pc = ctrl.vrs[PC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	return URC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		      const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	struct stackframe frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	if (!tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		arm_get_current_stackframe(regs, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		/* PC might be corrupted, use LR in that case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		if (!kernel_text_address(regs->ARM_pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 			frame.pc = regs->ARM_lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	} else if (tsk == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		frame.fp = (unsigned long)__builtin_frame_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		frame.sp = current_stack_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		frame.lr = (unsigned long)__builtin_return_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		frame.pc = (unsigned long)unwind_backtrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		/* task blocked in __switch_to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		frame.fp = thread_saved_fp(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		frame.sp = thread_saved_sp(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		 * The function calling __switch_to cannot be a leaf function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		 * so LR is recovered from the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		frame.lr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		frame.pc = thread_saved_pc(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		int urc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		unsigned long where = frame.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		urc = unwind_frame(&frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		if (urc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 				      unsigned long text_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 				      unsigned long text_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		 text_addr, text_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	if (!tab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		return tab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	tab->start = (const struct unwind_idx *)start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	tab->stop = (const struct unwind_idx *)(start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	tab->origin = unwind_find_origin(tab->start, tab->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	tab->begin_addr = text_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	tab->end_addr = text_addr + text_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	raw_spin_lock_irqsave(&unwind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	list_add_tail(&tab->list, &unwind_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	raw_spin_unlock_irqrestore(&unwind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	return tab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) void unwind_table_del(struct unwind_table *tab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	if (!tab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	raw_spin_lock_irqsave(&unwind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	list_del(&tab->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	raw_spin_unlock_irqrestore(&unwind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	kfree(tab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }