Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * This is an implementation of a DWARF unwinder. Its main purpose is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * for generating stacktrace information. Based on the DWARF 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * specification from http://www.dwarfstd.org.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *	- DWARF64 doesn't work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *	- Registers with DWARF_VAL_OFFSET rules aren't handled properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) /* #define DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/dwarf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/unwinder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) /* Reserve enough memory for two stack frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define DWARF_FRAME_MIN_REQ	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) /* ... with 4 registers per frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define DWARF_REG_MIN_REQ	(DWARF_FRAME_MIN_REQ * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static struct kmem_cache *dwarf_frame_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static mempool_t *dwarf_frame_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static struct kmem_cache *dwarf_reg_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static mempool_t *dwarf_reg_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static struct rb_root cie_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) static DEFINE_SPINLOCK(dwarf_cie_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static struct rb_root fde_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static DEFINE_SPINLOCK(dwarf_fde_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) static struct dwarf_cie *cached_cie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static unsigned int dwarf_unwinder_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  *	dwarf_frame_alloc_reg - allocate memory for a DWARF register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  *	@frame: the DWARF frame whose list of registers we insert on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  *	@reg_num: the register number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *	Allocate space for, and initialise, a dwarf reg from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  *	dwarf_reg_pool and insert it onto the (unsorted) linked-list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *	dwarf registers for @frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  *	Return the initialised DWARF reg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 					       unsigned int reg_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	struct dwarf_reg *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	if (!reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		printk(KERN_WARNING "Unable to allocate a DWARF register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		 * Let's just bomb hard here, we have no way to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		 * gracefully recover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		UNWINDER_BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	reg->number = reg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	reg->addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	reg->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	list_add(&reg->link, &frame->reg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static void dwarf_frame_free_regs(struct dwarf_frame *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct dwarf_reg *reg, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		list_del(&reg->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		mempool_free(reg, dwarf_reg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  *	dwarf_frame_reg - return a DWARF register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  *	@frame: the DWARF frame to search in for @reg_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  *	@reg_num: the register number to search for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  *	Lookup and return the dwarf reg @reg_num for this frame. Return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  *	NULL if @reg_num is an register invalid number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 					 unsigned int reg_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct dwarf_reg *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	list_for_each_entry(reg, &frame->reg_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		if (reg->number == reg_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  *	dwarf_read_addr - read dwarf data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *	@src: source address of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  *	@dst: destination address to store the data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  *	Read 'n' bytes from @src, where 'n' is the size of an address on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *	the native machine. We return the number of bytes read, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  *	should always be 'n'. We also have to be careful when reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  *	from @src and writing to @dst, because they can be arbitrarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  *	aligned. Return 'n' - the number of bytes read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	u32 val = get_unaligned(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	put_unaligned(val, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	return sizeof(unsigned long *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  *	dwarf_read_uleb128 - read unsigned LEB128 data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  *	@addr: the address where the ULEB128 data is stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  *	@ret: address to store the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  *	Decode an unsigned LEB128 encoded datum. The algorithm is taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  *	from Appendix C of the DWARF 3 spec. For information on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  *	encodings refer to section "7.6 - Variable Length Data". Return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  *	the number of bytes read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	unsigned int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	unsigned char byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	int shift, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		byte = __raw_readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		addr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		result |= (byte & 0x7f) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		shift += 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		if (!(byte & 0x80))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	*ret = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  *	dwarf_read_leb128 - read signed LEB128 data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  *	@addr: the address of the LEB128 encoded data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  *	@ret: address to store the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  *	Decode signed LEB128 data. The algorithm is taken from Appendix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  *	C of the DWARF 3 spec. Return the number of bytes read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	unsigned char byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	int result, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	int num_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		byte = __raw_readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		addr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		result |= (byte & 0x7f) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		shift += 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		if (!(byte & 0x80))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	/* The number of bits in a signed integer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	num_bits = 8 * sizeof(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	if ((shift < num_bits) && (byte & 0x40))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		result |= (-1 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	*ret = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  *	dwarf_read_encoded_value - return the decoded value at @addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  *	@addr: the address of the encoded value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  *	@val: where to write the decoded value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  *	@encoding: the encoding with which we can decode @addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  *	GCC emits encoded address in the .eh_frame FDE entries. Decode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  *	the value at @addr using @encoding. The decoded value is written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  *	to @val and the number of bytes read is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static int dwarf_read_encoded_value(char *addr, unsigned long *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 				    char encoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	unsigned long decoded_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	switch (encoding & 0x70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	case DW_EH_PE_absptr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	case DW_EH_PE_pcrel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		decoded_addr = (unsigned long)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		pr_debug("encoding=0x%x\n", (encoding & 0x70));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		UNWINDER_BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	if ((encoding & 0x07) == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		encoding |= DW_EH_PE_udata4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	switch (encoding & 0x0f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	case DW_EH_PE_sdata4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	case DW_EH_PE_udata4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		count += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		decoded_addr += get_unaligned((u32 *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		__raw_writel(decoded_addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		pr_debug("encoding=0x%x\n", encoding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		UNWINDER_BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  *	dwarf_entry_len - return the length of an FDE or CIE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  *	@addr: the address of the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  *	@len: the length of the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  *	Read the initial_length field of the entry and store the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  *	the entry in @len. We return the number of bytes read. Return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  *	count of 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) static inline int dwarf_entry_len(char *addr, unsigned long *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	u32 initial_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	initial_len = get_unaligned((u32 *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	 * An initial length field value in the range DW_LEN_EXT_LO -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	 * DW_LEN_EXT_HI indicates an extension, and should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	 * interpreted as a length. The only extension that we currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	 * understand is the use of DWARF64 addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		 * The 64-bit length field immediately follows the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		 * compulsory 32-bit length field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		if (initial_len == DW_EXT_DWARF64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			*len = get_unaligned((u64 *)addr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			count = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			printk(KERN_WARNING "Unknown DWARF extension\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 			count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		*len = initial_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  *	dwarf_lookup_cie - locate the cie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  *	@cie_ptr: pointer to help with lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct rb_node **rb_node = &cie_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	struct dwarf_cie *cie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	spin_lock_irqsave(&dwarf_cie_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	 * We've cached the last CIE we looked up because chances are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	 * that the FDE wants this CIE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		cie = cached_cie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	while (*rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		struct dwarf_cie *cie_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		BUG_ON(!cie_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		if (cie_ptr == cie_tmp->cie_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			cie = cie_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 			cached_cie = cie_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			if (cie_ptr < cie_tmp->cie_pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 				rb_node = &(*rb_node)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 				rb_node = &(*rb_node)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	return cie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  *	dwarf_lookup_fde - locate the FDE that covers pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  *	@pc: the program counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	struct rb_node **rb_node = &fde_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	struct dwarf_fde *fde = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	spin_lock_irqsave(&dwarf_fde_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	while (*rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		struct dwarf_fde *fde_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		unsigned long tmp_start, tmp_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		BUG_ON(!fde_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		tmp_start = fde_tmp->initial_location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		if (pc < tmp_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			rb_node = &(*rb_node)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			if (pc < tmp_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 				fde = fde_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 				rb_node = &(*rb_node)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	return fde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  *	dwarf_cfa_execute_insns - execute instructions to calculate a CFA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  *	@insn_start: address of the first instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  *	@insn_end: address of the last instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  *	@cie: the CIE for this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  *	@fde: the FDE for this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  *	@frame: the instructions calculate the CFA for this frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  *	@pc: the program counter of the address we're interested in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  *	Execute the Call Frame instruction sequence starting at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  *	@insn_start and ending at @insn_end. The instructions describe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  *	how to calculate the Canonical Frame Address of a stackframe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  *	Store the results in @frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) static int dwarf_cfa_execute_insns(unsigned char *insn_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				   unsigned char *insn_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 				   struct dwarf_cie *cie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 				   struct dwarf_fde *fde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 				   struct dwarf_frame *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				   unsigned long pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	unsigned char insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	unsigned char *current_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	unsigned int count, delta, reg, expr_len, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct dwarf_reg *regp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	current_insn = insn_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	while (current_insn < insn_end && frame->pc <= pc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		insn = __raw_readb(current_insn++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		 * Firstly, handle the opcodes that embed their operands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		 * in the instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		switch (DW_CFA_opcode(insn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		case DW_CFA_advance_loc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			delta = DW_CFA_operand(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			delta *= cie->code_alignment_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			frame->pc += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 			/* NOTREACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		case DW_CFA_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			reg = DW_CFA_operand(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			count = dwarf_read_uleb128(current_insn, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			offset *= cie->data_alignment_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			regp = dwarf_frame_alloc_reg(frame, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			regp->addr = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			regp->flags |= DWARF_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			/* NOTREACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		case DW_CFA_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			reg = DW_CFA_operand(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			/* NOTREACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		 * Secondly, handle the opcodes that don't embed their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		 * operands in the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		switch (insn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		case DW_CFA_nop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		case DW_CFA_advance_loc1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			delta = *current_insn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			frame->pc += delta * cie->code_alignment_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		case DW_CFA_advance_loc2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			delta = get_unaligned((u16 *)current_insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			current_insn += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			frame->pc += delta * cie->code_alignment_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		case DW_CFA_advance_loc4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			delta = get_unaligned((u32 *)current_insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			current_insn += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			frame->pc += delta * cie->code_alignment_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		case DW_CFA_offset_extended:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			count = dwarf_read_uleb128(current_insn, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			count = dwarf_read_uleb128(current_insn, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			offset *= cie->data_alignment_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		case DW_CFA_restore_extended:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			count = dwarf_read_uleb128(current_insn, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		case DW_CFA_undefined:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			count = dwarf_read_uleb128(current_insn, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			regp = dwarf_frame_alloc_reg(frame, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			regp->flags |= DWARF_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		case DW_CFA_def_cfa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			count = dwarf_read_uleb128(current_insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 						   &frame->cfa_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			count = dwarf_read_uleb128(current_insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 						   &frame->cfa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		case DW_CFA_def_cfa_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			count = dwarf_read_uleb128(current_insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 						   &frame->cfa_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		case DW_CFA_def_cfa_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			count = dwarf_read_uleb128(current_insn, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			frame->cfa_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		case DW_CFA_def_cfa_expression:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			count = dwarf_read_uleb128(current_insn, &expr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			frame->cfa_expr = current_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			frame->cfa_expr_len = expr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			current_insn += expr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			frame->flags |= DWARF_FRAME_CFA_REG_EXP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		case DW_CFA_offset_extended_sf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			count = dwarf_read_uleb128(current_insn, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 			count = dwarf_read_leb128(current_insn, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			offset *= cie->data_alignment_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			regp = dwarf_frame_alloc_reg(frame, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			regp->flags |= DWARF_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			regp->addr = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		case DW_CFA_val_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			count = dwarf_read_uleb128(current_insn, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			count = dwarf_read_leb128(current_insn, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			offset *= cie->data_alignment_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			regp = dwarf_frame_alloc_reg(frame, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			regp->flags |= DWARF_VAL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			regp->addr = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		case DW_CFA_GNU_args_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			count = dwarf_read_uleb128(current_insn, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		case DW_CFA_GNU_negative_offset_extended:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			count = dwarf_read_uleb128(current_insn, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			current_insn += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			count = dwarf_read_uleb128(current_insn, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			offset *= cie->data_alignment_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			regp = dwarf_frame_alloc_reg(frame, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			regp->flags |= DWARF_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			regp->addr = -offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			pr_debug("unhandled DWARF instruction 0x%x\n", insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			UNWINDER_BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  *	dwarf_free_frame - free the memory allocated for @frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  *	@frame: the frame to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) void dwarf_free_frame(struct dwarf_frame *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	dwarf_frame_free_regs(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	mempool_free(frame, dwarf_frame_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) extern void ret_from_irq(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  *	dwarf_unwind_stack - unwind the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  *	@pc: address of the function to unwind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  *	@prev: struct dwarf_frame of the previous stackframe on the callstack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  *	Return a struct dwarf_frame representing the most recent frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  *	on the callstack. Each of the lower (older) stack frames are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  *	linked via the "prev" member.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 				       struct dwarf_frame *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	struct dwarf_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	struct dwarf_cie *cie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	struct dwarf_fde *fde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	struct dwarf_reg *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	 * If we've been called in to before initialization has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 * completed, bail out immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (!dwarf_unwinder_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	 * If we're starting at the top of the stack we need get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	 * contents of a physical register to get the CFA in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	 * begin the virtual unwinding of the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	 * NOTE: the return address is guaranteed to be setup by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 * time this function makes its first function call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (!pc || !prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		pc = _THIS_IP_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	 * If our stack has been patched by the function graph tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 * then we might see the address of return_to_handler() where we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 * expected to find the real return address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (pc == (unsigned long)&return_to_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		struct ftrace_ret_stack *ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		ret_stack = ftrace_graph_get_ret_stack(current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		if (ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			pc = ret_stack->ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		 * We currently have no way of tracking how many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		 * return_to_handler()'s we've seen. If there is more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		 * than one patched return address on our stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		 * complain loudly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		WARN_ON(ftrace_graph_get_ret_stack(current, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	if (!frame) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		printk(KERN_ERR "Unable to allocate a dwarf frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		UNWINDER_BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	INIT_LIST_HEAD(&frame->reg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	frame->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	frame->prev = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	frame->return_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	fde = dwarf_lookup_fde(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if (!fde) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		 * This is our normal exit path. There are two reasons
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		 * why we might exit here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		 *	a) pc has no asscociated DWARF frame info and so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		 *	we don't know how to unwind this frame. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		 *	usually the case when we're trying to unwind a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		 *	frame that was called from some assembly code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		 *	that has no DWARF info, e.g. syscalls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		 *	b) the DEBUG info for pc is bogus. There's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		 *	really no way to distinguish this case from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		 *	case above, which sucks because we could print a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		 *	warning here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	cie = dwarf_lookup_cie(fde->cie_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	frame->pc = fde->initial_location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	/* CIE initial instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	dwarf_cfa_execute_insns(cie->initial_instructions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 				cie->instructions_end, cie, fde,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 				frame, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	/* FDE instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 				fde, frame, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	/* Calculate the CFA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	switch (frame->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	case DWARF_FRAME_CFA_REG_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		if (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			reg = dwarf_frame_reg(prev, frame->cfa_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			UNWINDER_BUG_ON(!reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			addr = prev->cfa + reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			frame->cfa = __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			 * Again, we're starting from the top of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			 * stack. We need to physically read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			 * the contents of a register in order to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			 * the Canonical Frame Address for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			 * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		frame->cfa += frame->cfa_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		UNWINDER_BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 * If we haven't seen the return address register or the return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	 * address column is undefined then we must assume that this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	 * the end of the callstack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (!reg || reg->flags == DWARF_UNDEFINED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	addr = frame->cfa + reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	frame->return_addr = __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	 * Ah, the joys of unwinding through interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	 * Interrupts are tricky - the DWARF info needs to be _really_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	 * accurate and unfortunately I'm seeing a lot of bogus DWARF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	 * info. For example, I've seen interrupts occur in epilogues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * just after the frame pointer (r14) had been restored. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * problem was that the DWARF info claimed that the CFA could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 * reached by using the value of the frame pointer before it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 * restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	 * So until the compiler can be trusted to produce reliable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	 * DWARF info when it really matters, let's stop unwinding once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	 * we've calculated the function that was interrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	if (prev && prev->pc == (unsigned long)ret_from_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		frame->return_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	return frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	dwarf_free_frame(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			   unsigned char *end, struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	struct rb_node **rb_node = &cie_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	struct rb_node *parent = *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	struct dwarf_cie *cie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	cie = kzalloc(sizeof(*cie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (!cie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	cie->length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * Record the offset into the .eh_frame section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 * for this CIE. It allows this CIE to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 * quickly and easily looked up from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	 * corresponding FDE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	cie->cie_pointer = (unsigned long)entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	cie->version = *(char *)p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	UNWINDER_BUG_ON(cie->version != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	cie->augmentation = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	p += strlen(cie->augmentation) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	p += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	count = dwarf_read_leb128(p, &cie->data_alignment_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	p += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	 * Which column in the rule table contains the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	 * return address?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	if (cie->version == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		cie->return_address_reg = __raw_readb(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		count = dwarf_read_uleb128(p, &cie->return_address_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		p += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (cie->augmentation[0] == 'z') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		unsigned int length, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		cie->flags |= DWARF_CIE_Z_AUGMENTATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		count = dwarf_read_uleb128(p, &length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		p += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		UNWINDER_BUG_ON((unsigned char *)p > end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		cie->initial_instructions = p + length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		cie->augmentation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	while (*cie->augmentation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		 * "L" indicates a byte showing how the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		 * LSDA pointer is encoded. Skip it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		if (*cie->augmentation == 'L') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			cie->augmentation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		} else if (*cie->augmentation == 'R') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			 * "R" indicates a byte showing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			 * how FDE addresses are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			 * encoded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			cie->encoding = *(char *)p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			cie->augmentation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		} else if (*cie->augmentation == 'P') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			 * "R" indicates a personality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			 * routine in the CIE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			 * augmentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			UNWINDER_BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		} else if (*cie->augmentation == 'S') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			UNWINDER_BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			 * Unknown augmentation. Assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			 * 'z' augmentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			p = cie->initial_instructions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			UNWINDER_BUG_ON(!p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	cie->initial_instructions = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	cie->instructions_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	/* Add to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	spin_lock_irqsave(&dwarf_cie_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	while (*rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		struct dwarf_cie *cie_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		parent = *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		if (cie->cie_pointer < cie_tmp->cie_pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 			rb_node = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		else if (cie->cie_pointer >= cie_tmp->cie_pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			rb_node = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	rb_link_node(&cie->node, parent, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	rb_insert_color(&cie->node, &cie_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (mod != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		list_add_tail(&cie->link, &mod->arch.cie_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) static int dwarf_parse_fde(void *entry, u32 entry_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			   void *start, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			   unsigned char *end, struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	struct rb_node **rb_node = &fde_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	struct rb_node *parent = *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	struct dwarf_fde *fde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	struct dwarf_cie *cie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	void *p = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	fde = kzalloc(sizeof(*fde), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (!fde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	fde->length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	 * In a .eh_frame section the CIE pointer is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	 * delta between the address within the FDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	fde->cie_pointer = (unsigned long)(p - entry_type - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	cie = dwarf_lookup_cie(fde->cie_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	fde->cie = cie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	if (cie->encoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		count = dwarf_read_encoded_value(p, &fde->initial_location,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 						 cie->encoding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		count = dwarf_read_addr(p, &fde->initial_location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	p += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	if (cie->encoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		count = dwarf_read_encoded_value(p, &fde->address_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 						 cie->encoding & 0x0f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		count = dwarf_read_addr(p, &fde->address_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	p += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		unsigned int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		count = dwarf_read_uleb128(p, &length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		p += count + length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	/* Call frame instructions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	fde->instructions = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	fde->end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	/* Add to list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	spin_lock_irqsave(&dwarf_fde_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	while (*rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		struct dwarf_fde *fde_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		unsigned long tmp_start, tmp_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		start = fde->initial_location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		end = fde->initial_location + fde->address_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		tmp_start = fde_tmp->initial_location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		parent = *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		if (start < tmp_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			rb_node = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		else if (start >= tmp_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			rb_node = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	rb_link_node(&fde->node, parent, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	rb_insert_color(&fde->node, &fde_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	if (mod != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		list_add_tail(&fde->link, &mod->arch.fde_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) static void dwarf_unwinder_dump(struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 				struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 				unsigned long *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 				const struct stacktrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	struct dwarf_frame *frame, *_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	unsigned long return_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	_frame = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	return_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		frame = dwarf_unwind_stack(return_addr, _frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		if (_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			dwarf_free_frame(_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		_frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		if (!frame || !frame->return_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		return_addr = frame->return_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		ops->address(data, return_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	if (frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		dwarf_free_frame(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) static struct unwinder dwarf_unwinder = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	.name = "dwarf-unwinder",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	.dump = dwarf_unwinder_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	.rating = 150,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) static void __init dwarf_unwinder_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	struct dwarf_fde *fde, *next_fde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	struct dwarf_cie *cie, *next_cie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	 * Deallocate all the memory allocated for the DWARF unwinder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	 * Traverse all the FDE/CIE lists and remove and free all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 * memory associated with those data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	rbtree_postorder_for_each_entry_safe(fde, next_fde, &fde_root, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		kfree(fde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		kfree(cie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	mempool_destroy(dwarf_reg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	mempool_destroy(dwarf_frame_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	kmem_cache_destroy(dwarf_reg_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	kmem_cache_destroy(dwarf_frame_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  *	dwarf_parse_section - parse DWARF section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  *	@eh_frame_start: start address of the .eh_frame section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  *	@eh_frame_end: end address of the .eh_frame section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  *	@mod: the kernel module containing the .eh_frame section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  *	Parse the information in a .eh_frame section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			       struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	u32 entry_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	void *p, *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	int count, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	unsigned long len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	unsigned int c_entries, f_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	unsigned char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	c_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	f_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	entry = eh_frame_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	while ((char *)entry < eh_frame_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		p = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		count = dwarf_entry_len(p, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		if (count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			 * We read a bogus length field value. There is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			 * nothing we can do here apart from disabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			 * the DWARF unwinder. We can't even skip this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			 * entry and move to the next one because 'len'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			 * tells us where our next entry is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			p += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		/* initial length does not include itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		end = p + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		entry_type = get_unaligned((u32 *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		if (entry_type == DW_EH_FRAME_CIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			err = dwarf_parse_cie(entry, p, len, end, mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				c_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			err = dwarf_parse_fde(entry, entry_type, p, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 					      end, mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				f_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		entry = (char *)entry + len + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	       c_entries, f_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			  struct module *me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	unsigned int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	start = end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	for (i = 1; i < hdr->e_shnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		/* Alloc bit cleared means "ignore it." */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		if ((sechdrs[i].sh_flags & SHF_ALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		    && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			start = sechdrs[i].sh_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			end = start + sechdrs[i].sh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	/* Did we find the .eh_frame section? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (i != hdr->e_shnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		INIT_LIST_HEAD(&me->arch.cie_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		INIT_LIST_HEAD(&me->arch.fde_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		err = dwarf_parse_section((char *)start, (char *)end, me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			printk(KERN_WARNING "%s: failed to parse DWARF info\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			       me->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  *	module_dwarf_cleanup - remove FDE/CIEs associated with @mod
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  *	@mod: the module that is being unloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)  *	Remove any FDEs and CIEs from the global lists that came from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)  *	@mod's .eh_frame section because @mod is being unloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) void module_dwarf_cleanup(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	struct dwarf_fde *fde, *ftmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	struct dwarf_cie *cie, *ctmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	spin_lock_irqsave(&dwarf_cie_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		list_del(&cie->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		rb_erase(&cie->node, &cie_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		kfree(cie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	spin_lock_irqsave(&dwarf_fde_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		list_del(&fde->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		rb_erase(&fde->node, &fde_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		kfree(fde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)  *	dwarf_unwinder_init - initialise the dwarf unwinder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)  *	Build the data structures describing the .dwarf_frame section to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  *	make it easier to lookup CIE and FDE entries. Because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  *	.eh_frame section is packed as tightly as possible it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  *	easy to lookup the FDE for a given PC, so we build a list of FDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)  *	and CIE entries that make it easier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static int __init dwarf_unwinder_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			sizeof(struct dwarf_frame), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			sizeof(struct dwarf_reg), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 						    dwarf_frame_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	if (!dwarf_frame_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	dwarf_reg_pool = mempool_create_slab_pool(DWARF_REG_MIN_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 						  dwarf_reg_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (!dwarf_reg_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	err = unwinder_register(&dwarf_unwinder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	dwarf_unwinder_ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	dwarf_unwinder_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) early_initcall(dwarf_unwinder_init);