Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Generic stack depot for storing stack traces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Some debugging tools need to save stack traces of certain events which can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * be later presented to the user. For example, KASAN needs to safe alloc and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * free stacks for each object, but storing two stack traces per object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * that).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * and free stacks repeat a lot, we save about 100x space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Stacks are never removed from depot, so we store them contiguously one after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * another in a contiguos memory allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * Author: Alexander Potapenko <glider@google.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * Copyright (C) 2016 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * Based on code by Dmitry Chernenkov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/stackdepot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define STACK_ALLOC_NULL_PROTECTION_BITS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define STACK_ALLOC_ALIGN 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 					STACK_ALLOC_ALIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define STACK_ALLOC_SLABS_CAP 8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define STACK_ALLOC_MAX_SLABS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /* The compact structure to store the reference to stacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) union handle_parts {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	depot_stack_handle_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		u32 slabindex : STACK_ALLOC_INDEX_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		u32 offset : STACK_ALLOC_OFFSET_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) struct stack_record {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	struct stack_record *next;	/* Link in the hashtable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u32 hash;			/* Hash in the hastable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	u32 size;			/* Number of frames in the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	union handle_parts handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	unsigned long entries[];	/* Variable-sized array of entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static int depot_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static int next_slab_inited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static size_t depot_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static DEFINE_RAW_SPINLOCK(depot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static bool init_stack_slab(void **prealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (!*prealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 * This smp_load_acquire() pairs with smp_store_release() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	 * |next_slab_inited| below and in depot_alloc_stack().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (smp_load_acquire(&next_slab_inited))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (stack_slabs[depot_index] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		stack_slabs[depot_index] = *prealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		*prealloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		/* If this is the last depot slab, do not touch the next one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			stack_slabs[depot_index + 1] = *prealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			*prealloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		 * This smp_store_release pairs with smp_load_acquire() from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		 * |next_slab_inited| above and in stack_depot_save().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		smp_store_release(&next_slab_inited, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Allocation of a new stack in raw storage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		u32 hash, void **prealloc, gfp_t alloc_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct stack_record *stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	size_t required_size = struct_size(stack, entries, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			WARN_ONCE(1, "Stack depot reached limit capacity");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		depot_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		depot_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		 * smp_store_release() here pairs with smp_load_acquire() from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		 * |next_slab_inited| in stack_depot_save() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		 * init_stack_slab().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			smp_store_release(&next_slab_inited, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	init_stack_slab(prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (stack_slabs[depot_index] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	stack = stack_slabs[depot_index] + depot_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	stack->hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	stack->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	stack->handle.slabindex = depot_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	stack->handle.valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	depot_offset += required_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define STACK_HASH_SEED 0x9747b28c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static bool stack_depot_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static struct stack_record **stack_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int __init is_stack_depot_disabled(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	ret = kstrtobool(str, &stack_depot_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (!ret && stack_depot_disable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		pr_info("Stack Depot is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		stack_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) early_param("stack_depot_disable", is_stack_depot_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int __init stack_depot_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (!stack_depot_disable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		stack_table = memblock_alloc(size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		for (i = 0; i < STACK_HASH_SIZE;  i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			stack_table[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Calculate hash for a stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static inline u32 hash_stack(unsigned long *entries, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	return jhash2((u32 *)entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		      array_size(size,  sizeof(*entries)) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		      STACK_HASH_SEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Use our own, non-instrumented version of memcmp().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * We actually don't care about the order, just the equality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	for ( ; n-- ; u1++, u2++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		if (*u1 != *u2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Find a stack that is equal to the one stored in entries in the hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline struct stack_record *find_stack(struct stack_record *bucket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 					     unsigned long *entries, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 					     u32 hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct stack_record *found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	for (found = bucket; found; found = found->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		if (found->hash == hash &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		    found->size == size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		    !stackdepot_memcmp(entries, found->entries, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  * stack_depot_fetch - Fetch stack entries from a depot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * @handle:		Stack depot handle which was returned from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  *			stack_depot_save().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  * @entries:		Pointer to store the entries address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * Return: The number of trace entries for this depot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned int stack_depot_fetch(depot_stack_handle_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			       unsigned long **entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	union handle_parts parts = { .handle = handle };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	void *slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	struct stack_record *stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	*entries = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (parts.slabindex > depot_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			parts.slabindex, depot_index, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	slab = stack_slabs[parts.slabindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	if (!slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	stack = slab + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	*entries = stack->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	return stack->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) EXPORT_SYMBOL_GPL(stack_depot_fetch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * stack_depot_save - Save a stack trace from an array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  * @entries:		Pointer to storage array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * @nr_entries:		Size of the storage array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  * @alloc_flags:	Allocation gfp flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * Return: The handle of the stack struct stored in depot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) depot_stack_handle_t stack_depot_save(unsigned long *entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				      unsigned int nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 				      gfp_t alloc_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct stack_record *found = NULL, **bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	depot_stack_handle_t retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	void *prealloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	u32 hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (unlikely(nr_entries == 0) || stack_depot_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		goto fast_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	hash = hash_stack(entries, nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	bucket = &stack_table[hash & STACK_HASH_MASK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	 * Fast path: look the stack trace up without locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 * The smp_load_acquire() here pairs with smp_store_release() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	 * |bucket| below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	found = find_stack(smp_load_acquire(bucket), entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			   nr_entries, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	 * Check if the current or the next stack slab need to be initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	 * If so, allocate the memory - we won't be able to do that under the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 * lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * The smp_load_acquire() here pairs with smp_store_release() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (unlikely(!smp_load_acquire(&next_slab_inited))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		 * Zero out zone modifiers, as we don't have specific zone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		 * requirements. Keep the flags related to allocation in atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		 * contexts and I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		alloc_flags &= ~GFP_ZONEMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		alloc_flags |= __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			prealloc = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	raw_spin_lock_irqsave(&depot_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	found = find_stack(*bucket, entries, nr_entries, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		struct stack_record *new =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			depot_alloc_stack(entries, nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 					  hash, &prealloc, alloc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			new->next = *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			 * This smp_store_release() pairs with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			 * smp_load_acquire() from |bucket| above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			smp_store_release(bucket, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			found = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	} else if (prealloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		 * We didn't need to store this stack trace, but let's keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		 * the preallocated memory for the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		WARN_ON(!init_stack_slab(&prealloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	raw_spin_unlock_irqrestore(&depot_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (prealloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		/* Nobody used this memory, ok to free it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		retval = found->handle.handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) fast_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) EXPORT_SYMBOL_GPL(stack_depot_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static inline int in_irqentry_text(unsigned long ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	return (ptr >= (unsigned long)&__irqentry_text_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		ptr < (unsigned long)&__irqentry_text_end) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		(ptr >= (unsigned long)&__softirqentry_text_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		 ptr < (unsigned long)&__softirqentry_text_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned int filter_irq_stacks(unsigned long *entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 					     unsigned int nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	for (i = 0; i < nr_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		if (in_irqentry_text(entries[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			/* Include the irqentry function into the stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			return i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	return nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) EXPORT_SYMBOL_GPL(filter_irq_stacks);