Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * powerpc code to implement the kexec_file_load syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2004  Adam Litke (agl@us.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2004  IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2004,2005  Milton D Miller II, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2005  R Sharada (sharada@in.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2006  Mohan Kumar M (mohan@in.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Copyright (C) 2020  IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Based on kexec-tools' kexec-ppc64.c, fs2dt.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Heavily modified for the kernel by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * Hari Bathini, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define pr_fmt(fmt) "kexec ranges: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/kexec_ranges.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * get_max_nr_ranges - Get the max no. of ranges crash_mem structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *                     could hold, given the size allocated for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * @size:              Allocation size of crash_mem structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * Returns the maximum no. of ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) static inline unsigned int get_max_nr_ranges(size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	return ((size - sizeof(struct crash_mem)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		sizeof(struct crash_mem_range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * get_mem_rngs_size - Get the allocated size of mem_rngs based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *                     max_nr_ranges and chunk size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * @mem_rngs:          Memory ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * Returns the maximum size of @mem_rngs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static inline size_t get_mem_rngs_size(struct crash_mem *mem_rngs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (!mem_rngs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	size = (sizeof(struct crash_mem) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		(mem_rngs->max_nr_ranges * sizeof(struct crash_mem_range)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 * Memory is allocated in size multiple of MEM_RANGE_CHUNK_SZ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 * So, align to get the actual length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	return ALIGN(size, MEM_RANGE_CHUNK_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * __add_mem_range - add a memory range to memory ranges list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * @mem_ranges:      Range list to add the memory range to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * @base:            Base address of the range to add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * @size:            Size of the memory range to add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * (Re)allocates memory, if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * Returns 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static int __add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct crash_mem *mem_rngs = *mem_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (!mem_rngs || (mem_rngs->nr_ranges == mem_rngs->max_nr_ranges)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		mem_rngs = realloc_mem_ranges(mem_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if (!mem_rngs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	mem_rngs->ranges[mem_rngs->nr_ranges].start = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	mem_rngs->ranges[mem_rngs->nr_ranges].end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	pr_debug("Added memory range [%#016llx - %#016llx] at index %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		 base, base + size - 1, mem_rngs->nr_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	mem_rngs->nr_ranges++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * __merge_memory_ranges - Merges the given memory ranges list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * @mem_rngs:              Range list to merge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * Assumes a sorted range list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * Returns nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static void __merge_memory_ranges(struct crash_mem *mem_rngs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct crash_mem_range *ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	int i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (!mem_rngs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	ranges = &(mem_rngs->ranges[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	for (i = 1; i < mem_rngs->nr_ranges; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		if (ranges[i].start <= (ranges[i-1].end + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			ranges[idx].end = ranges[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			if (i == idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			ranges[idx] = ranges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	mem_rngs->nr_ranges = idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* cmp_func_t callback to sort ranges with sort() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int rngcmp(const void *_x, const void *_y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	const struct crash_mem_range *x = _x, *y = _y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (x->start > y->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (x->start < y->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * sort_memory_ranges - Sorts the given memory ranges list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * @mem_rngs:           Range list to sort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * @merge:              If true, merge the list after sorting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * Returns nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void sort_memory_ranges(struct crash_mem *mem_rngs, bool merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	if (!mem_rngs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	/* Sort the ranges in-place */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	sort(&(mem_rngs->ranges[0]), mem_rngs->nr_ranges,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	     sizeof(mem_rngs->ranges[0]), rngcmp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		__merge_memory_ranges(mem_rngs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	/* For debugging purpose */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	pr_debug("Memory ranges:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	for (i = 0; i < mem_rngs->nr_ranges; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		pr_debug("\t[%03d][%#016llx - %#016llx]\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			 mem_rngs->ranges[i].start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			 mem_rngs->ranges[i].end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * realloc_mem_ranges - reallocate mem_ranges with size incremented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *                      by MEM_RANGE_CHUNK_SZ. Frees up the old memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *                      if memory allocation fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * @mem_ranges:         Memory ranges to reallocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * Returns pointer to reallocated memory on success, NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct crash_mem *mem_rngs = *mem_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	unsigned int nr_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	size = get_mem_rngs_size(mem_rngs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	nr_ranges = mem_rngs ? mem_rngs->nr_ranges : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	size += MEM_RANGE_CHUNK_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	mem_rngs = krealloc(*mem_ranges, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (!mem_rngs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		kfree(*mem_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		*mem_ranges = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	mem_rngs->nr_ranges = nr_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	mem_rngs->max_nr_ranges = get_max_nr_ranges(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	*mem_ranges = mem_rngs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return mem_rngs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * add_mem_range - Updates existing memory range, if there is an overlap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  *                 Else, adds a new memory range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * @mem_ranges:    Range list to add the memory range to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * @base:          Base address of the range to add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * @size:          Size of the memory range to add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * (Re)allocates memory, if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * Returns 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	struct crash_mem *mem_rngs = *mem_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	u64 mstart, mend, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (!mem_rngs || !(mem_rngs->nr_ranges))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return __add_mem_range(mem_ranges, base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	for (i = 0; i < mem_rngs->nr_ranges; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		mstart = mem_rngs->ranges[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		mend = mem_rngs->ranges[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		if (base < mend && end > mstart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			if (base < mstart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 				mem_rngs->ranges[i].start = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			if (end > mend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				mem_rngs->ranges[i].end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	return __add_mem_range(mem_ranges, base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * add_tce_mem_ranges - Adds tce-table range to the given memory ranges list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * @mem_ranges:         Range list to add the memory range(s) to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * Returns 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int add_tce_mem_ranges(struct crash_mem **mem_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct device_node *dn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	for_each_node_by_type(dn, "pci") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		u64 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		ret = of_property_read_u64(dn, "linux,tce-base", &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		ret |= of_property_read_u32(dn, "linux,tce-size", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			 * It is ok to have pci nodes without tce. So, ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			 * property does not exist error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			if (ret == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		ret = add_mem_range(mem_ranges, base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	of_node_put(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * add_initrd_mem_range - Adds initrd range to the given memory ranges list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  *                        if the initrd was retained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * @mem_ranges:           Range list to add the memory range to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * Returns 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int add_initrd_mem_range(struct crash_mem **mem_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	u64 base, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	/* This range means something, only if initrd was retained */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (!strstr(saved_command_line, "retain_initrd"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	ret = of_property_read_u64(of_chosen, "linux,initrd-start", &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	ret |= of_property_read_u64(of_chosen, "linux,initrd-end", &end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		ret = add_mem_range(mem_ranges, base, end - base + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * add_htab_mem_range - Adds htab range to the given memory ranges list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  *                      if it exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * @mem_ranges:         Range list to add the memory range to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  * Returns 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int add_htab_mem_range(struct crash_mem **mem_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (!htab_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * add_kernel_mem_range - Adds kernel text region to the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  *                        memory ranges list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * @mem_ranges:           Range list to add the memory range to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * Returns 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int add_kernel_mem_range(struct crash_mem **mem_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	return add_mem_range(mem_ranges, 0, __pa(_end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  * add_rtas_mem_range - Adds RTAS region to the given memory ranges list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  * @mem_ranges:         Range list to add the memory range to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  * Returns 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int add_rtas_mem_range(struct crash_mem **mem_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	u32 base, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	dn = of_find_node_by_path("/rtas");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (!dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	ret = of_property_read_u32(dn, "linux,rtas-base", &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	ret |= of_property_read_u32(dn, "rtas-size", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		ret = add_mem_range(mem_ranges, base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	of_node_put(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  * add_opal_mem_range - Adds OPAL region to the given memory ranges list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * @mem_ranges:         Range list to add the memory range to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  * Returns 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int add_opal_mem_range(struct crash_mem **mem_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	u64 base, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	dn = of_find_node_by_path("/ibm,opal");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	if (!dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	ret = of_property_read_u64(dn, "opal-base-address", &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	ret |= of_property_read_u64(dn, "opal-runtime-size", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		ret = add_mem_range(mem_ranges, base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	of_node_put(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * add_reserved_mem_ranges - Adds "/reserved-ranges" regions exported by f/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  *                           to the given memory ranges list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  * @mem_ranges:              Range list to add the memory ranges to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  * Returns 0 on success, negative errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	const __be32 *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	prop = of_get_property(of_root, "reserved-ranges", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	if (!prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	n_mem_addr_cells = of_n_addr_cells(of_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	n_mem_size_cells = of_n_size_cells(of_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	cells = n_mem_addr_cells + n_mem_size_cells;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	/* Each reserved range is an (address,size) pair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	for (i = 0; i < (len / (sizeof(u32) * cells)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		u64 base, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		base = of_read_number(prop + (i * cells), n_mem_addr_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		size = of_read_number(prop + (i * cells) + n_mem_addr_cells,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 				      n_mem_size_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		ret = add_mem_range(mem_ranges, base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }