^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Firmware Assisted dump: A robust mechanism to get reliable kernel crash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * dump with assistance from firmware. This approach does not use kexec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * instead firmware assists in booting the kdump kernel while preserving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * memory contents. The most of the code implementation has been adapted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * from phyp assisted dump implementation written by Linas Vepstas and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Manish Ahuja
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright 2011 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define pr_fmt(fmt) "fadump: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kobject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/cma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/fadump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/fadump-internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * The CPU who acquired the lock to trigger the fadump crash should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * wait for other CPUs to enter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * The timeout is in milliseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define CRASH_TIMEOUT 500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static struct fw_dump fw_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void __init fadump_reserve_crash_area(u64 base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct kobject *fadump_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #ifndef CONFIG_PRESERVE_FA_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static atomic_t cpus_in_fadump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static DEFINE_MUTEX(fadump_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) sizeof(struct fadump_memory_range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) RESERVED_RNGS_SZ, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) RESERVED_RNGS_CNT, true };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #ifdef CONFIG_CMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static struct cma *fadump_cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * fadump_cma_init() - Initialize CMA area from a fadump reserved memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * This function initializes CMA area from fadump reserved memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * The total size of fadump reserved memory covers for boot memory size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * + cpu data size + hpte size and metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Initialize only the area equivalent to boot memory size for CMA use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * The reamining portion of fadump reserved memory will be not given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * to CMA and pages for thoes will stay reserved. boot memory size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * aligned per CMA requirement to satisy cma_init_reserved_mem() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * But for some reason even if it fails we still have the memory reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * with us and we can still continue doing fadump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int __init fadump_cma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned long long base, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!fw_dump.fadump_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * Do not use CMA if user has provided fadump=nocma kernel parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Return 1 to continue with fadump old behaviour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (fw_dump.nocma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) base = fw_dump.reserve_dump_area_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) size = fw_dump.boot_memory_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Though the CMA init has failed we still have memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * reservation with us. The reserved memory will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * blocked from production system usage. Hence return 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * so that we can continue with fadump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * So we now have successfully initialized cma area for fadump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) "bytes of memory reserved for firmware-assisted dump\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) cma_get_size(fadump_cma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) (unsigned long)cma_get_base(fadump_cma) >> 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) fw_dump.reserve_dump_area_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int __init fadump_cma_init(void) { return 1; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #endif /* CONFIG_CMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Scan the Firmware Assisted dump configuration details. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int depth, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (depth == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) early_init_dt_scan_reserved_ranges(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (depth != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (strcmp(uname, "rtas") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) rtas_fadump_dt_scan(&fw_dump, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (strcmp(uname, "ibm,opal") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) opal_fadump_dt_scan(&fw_dump, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * If fadump is registered, check if the memory provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * falls within boot memory area and reserved memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int is_fadump_memory_area(u64 addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u64 d_start, d_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!fw_dump.dump_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) d_start = fw_dump.reserve_dump_area_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) d_end = d_start + fw_dump.reserve_dump_area_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (((addr + size) > d_start) && (addr <= d_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return (addr <= fw_dump.boot_mem_top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int should_fadump_crash(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int is_fadump_active(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return fw_dump.dump_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * Returns true, if there are no holes in memory area between d_start to d_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) phys_addr_t reg_start, reg_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u64 i, start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) for_each_mem_range(i, ®_start, ®_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) start = max_t(u64, d_start, reg_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) end = min_t(u64, d_end, reg_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (d_start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Memory hole from d_start to start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (start > d_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (end == d_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) d_start = end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Returns true, if there are no holes in boot memory area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) bool is_fadump_boot_mem_contiguous(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned long d_start, d_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) d_start = fw_dump.boot_mem_addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) d_end = d_start + fw_dump.boot_mem_sz[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ret = is_fadump_mem_area_contiguous(d_start, d_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Returns true, if there are no holes in reserved memory area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) bool is_fadump_reserved_mem_contiguous(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u64 d_start, d_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) d_start = fw_dump.reserve_dump_area_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) d_end = d_start + fw_dump.reserve_dump_area_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return is_fadump_mem_area_contiguous(d_start, d_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Print firmware assisted dump configurations for debugging purpose. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void fadump_show_config(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) pr_debug("Support for firmware-assisted dump (fadump): %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) (fw_dump.fadump_supported ? "present" : "no support"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!fw_dump.fadump_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) pr_debug("Fadump enabled : %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) (fw_dump.fadump_enabled ? "yes" : "no"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pr_debug("Dump Active : %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) (fw_dump.dump_active ? "yes" : "no"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) pr_debug("Dump section sizes:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) pr_debug(" Boot memory size : %lx\n", fw_dump.boot_memory_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pr_debug(" Boot memory top : %llx\n", fw_dump.boot_mem_top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pr_debug("Boot memory regions cnt: %llx\n", fw_dump.boot_mem_regs_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) pr_debug("[%03d] base = %llx, size = %llx\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) fw_dump.boot_mem_addr[i], fw_dump.boot_mem_sz[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * Function to find the largest memory size we need to reserve during early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * boot process. This will be the size of the memory that is required for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * kernel to boot successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * This function has been taken from phyp-assisted dump feature implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * returns larger of 256MB or 5% rounded down to multiples of 256MB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * TODO: Come up with better approach to find out more accurate memory size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * that is required for a kernel to boot successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static __init u64 fadump_calculate_reserve_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u64 base, size, bootmem_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (fw_dump.reserve_bootvar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Check if the size is specified through crashkernel= cmdline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * option. If yes, then use that but ignore base as fadump reserves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * memory at a predefined offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) &size, &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (ret == 0 && size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (fw_dump.reserve_bootvar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) pr_info("Using 'crashkernel=' parameter for memory reservation.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) fw_dump.reserve_bootvar = (unsigned long)size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * Adjust if the boot memory size specified is above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * the upper limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) max_size = memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (fw_dump.reserve_bootvar > max_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) fw_dump.reserve_bootvar = max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pr_info("Adjusted boot memory size to %luMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) (fw_dump.reserve_bootvar >> 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return fw_dump.reserve_bootvar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) } else if (fw_dump.reserve_bootvar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * 'fadump_reserve_mem=' is being used to reserve memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * for firmware-assisted dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return fw_dump.reserve_bootvar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* divide by 20 to get 5% of value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) size = memblock_phys_mem_size() / 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* round it down in multiples of 256 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) size = size & ~0x0FFFFFFFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Truncate to memory_limit. We don't want to over reserve the memory.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (memory_limit && size > memory_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) size = memory_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return (size > bootmem_min ? size : bootmem_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Calculate the total memory size required to be reserved for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * firmware-assisted dump registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static unsigned long get_fadump_area_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) size += fw_dump.cpu_state_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) size += fw_dump.hpte_region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) size += fw_dump.boot_memory_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) size += sizeof(struct fadump_crash_info_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) size += sizeof(struct elfhdr); /* ELF core header.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) size += sizeof(struct elf_phdr); /* place holder for cpu notes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Program headers for crash memory regions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* This is to hold kernel metadata on platforms that support it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) size += (fw_dump.ops->fadump_get_metadata_size ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) fw_dump.ops->fadump_get_metadata_size() : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int __init add_boot_mem_region(unsigned long rstart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned long rsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int i = fw_dump.boot_mem_regs_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (fw_dump.boot_mem_regs_cnt > FADUMP_MAX_MEM_REGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) fw_dump.boot_mem_regs_cnt = FADUMP_MAX_MEM_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) pr_debug("Added boot memory range[%d] [%#016lx-%#016lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) i, rstart, (rstart + rsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) fw_dump.boot_mem_addr[i] = rstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) fw_dump.boot_mem_sz[i] = rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * Firmware usually has a hard limit on the data it can copy per region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Honour that by splitting a memory range into multiple regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static int __init add_boot_mem_regions(unsigned long mstart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned long msize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned long rstart, rsize, max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) rstart = mstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) max_size = fw_dump.max_copy_size ? fw_dump.max_copy_size : msize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) while (msize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (msize > max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) rsize = max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) rsize = msize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ret = add_boot_mem_region(rstart, rsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) msize -= rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) rstart += rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static int __init fadump_get_boot_mem_regions(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned long size, cur_size, hole_size, last_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned long mem_size = fw_dump.boot_memory_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) phys_addr_t reg_start, reg_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) fw_dump.boot_mem_regs_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) last_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) hole_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) cur_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) for_each_mem_range(i, ®_start, ®_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) size = reg_end - reg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) hole_size += (reg_start - last_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if ((cur_size + size) >= mem_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) size = (mem_size - cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ret = add_boot_mem_regions(reg_start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) mem_size -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) cur_size += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ret = add_boot_mem_regions(reg_start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) last_end = reg_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * Returns true, if the given range overlaps with reserved memory ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * starting at idx. Also, updates idx to index of overlapping memory range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * with the given memory range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * False, otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) u64 rbase = reserved_mrange_info.mem_ranges[i].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (end <= rbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if ((end > rbase) && (base < rend)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) *idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * Locate a suitable memory area to reserve memory for FADump. While at it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct fadump_memory_range *mrngs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) phys_addr_t mstart, mend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) u64 i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) mrngs = reserved_mrange_info.mem_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) &mstart, &mend, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) i, mstart, mend, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (mstart > base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) base = PAGE_ALIGN(mstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) while ((mend > base) && ((mend - base) >= size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!overlaps_reserved_ranges(base, base+size, &idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ret = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) base = mrngs[idx].base + mrngs[idx].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) base = PAGE_ALIGN(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int __init fadump_reserve_mem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) u64 base, size, mem_boundary, bootmem_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (!fw_dump.fadump_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!fw_dump.fadump_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pr_info("Firmware-Assisted Dump is not supported on this hardware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Initialize boot memory size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * If dump is active then we have already calculated the size during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * first kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!fw_dump.dump_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) fw_dump.boot_memory_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) PAGE_ALIGN(fadump_calculate_reserve_size());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #ifdef CONFIG_CMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (!fw_dump.nocma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) fw_dump.boot_memory_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ALIGN(fw_dump.boot_memory_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) FADUMP_CMA_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (fw_dump.boot_memory_size < bootmem_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) pr_err("Can't enable fadump with boot memory size (0x%lx) less than 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) fw_dump.boot_memory_size, bootmem_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (!fadump_get_boot_mem_regions()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pr_err("Too many holes in boot memory area to enable fadump\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Calculate the memory boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * If memory_limit is less than actual memory boundary then reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * the memory for fadump beyond the memory_limit and adjust the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * memory_limit accordingly, so that the running kernel can run with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * specified memory_limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (memory_limit && memory_limit < memblock_end_of_DRAM()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) size = get_fadump_area_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if ((memory_limit + size) < memblock_end_of_DRAM())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) memory_limit += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) memory_limit = memblock_end_of_DRAM();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) " dump, now %#016llx\n", memory_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (memory_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) mem_boundary = memory_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) mem_boundary = memblock_end_of_DRAM();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) base = fw_dump.boot_mem_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) size = get_fadump_area_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) fw_dump.reserve_dump_area_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (fw_dump.dump_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) pr_info("Firmware-assisted dump is active.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * FADump capture kernel doesn't care much about hugepages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * In fact, handling hugepages in capture kernel is asking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * trouble. So, disable HugeTLB support when fadump is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) hugetlb_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * If last boot has crashed then reserve all the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * above boot memory size so that we don't touch it until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * dump is written to disk by userspace tool. This memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * can be released for general use by invalidating fadump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) fadump_reserve_crash_area(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pr_debug("fadumphdr_addr = %#016lx\n", fw_dump.fadumphdr_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) pr_debug("Reserve dump area start address: 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) fw_dump.reserve_dump_area_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * Reserve memory at an offset closer to bottom of the RAM to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * minimize the impact of memory hot-remove operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) base = fadump_locate_reserve_mem(base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (!base || (base + size > mem_boundary)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pr_err("Failed to find memory chunk for reservation!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) fw_dump.reserve_dump_area_start = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Calculate the kernel metadata address and register it with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * f/w if the platform supports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (fw_dump.ops->fadump_setup_metadata &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (memblock_reserve(base, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) pr_err("Failed to reserve memory!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) (size >> 20), base, (memblock_phys_mem_size() >> 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ret = fadump_cma_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) fw_dump.fadump_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* Look for fadump= cmdline option. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static int __init early_fadump_param(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (strncmp(p, "on", 2) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) fw_dump.fadump_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) else if (strncmp(p, "off", 3) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) fw_dump.fadump_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) else if (strncmp(p, "nocma", 5) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) fw_dump.fadump_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) fw_dump.nocma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) early_param("fadump", early_fadump_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Look for fadump_reserve_mem= cmdline option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * TODO: Remove references to 'fadump_reserve_mem=' parameter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * the sooner 'crashkernel=' parameter is accustomed to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static int __init early_fadump_reserve_mem(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) fw_dump.reserve_bootvar = memparse(p, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) early_param("fadump_reserve_mem", early_fadump_reserve_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) void crash_fadump(struct pt_regs *regs, const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) unsigned int msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct fadump_crash_info_header *fdh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int old_cpu, this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Do not include first CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unsigned int ncpus = num_online_cpus() - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!should_fadump_crash())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * old_cpu == -1 means this is the first CPU which has come here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * go ahead and trigger fadump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * old_cpu != -1 means some other CPU has already on it's way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * to trigger fadump, just keep looping here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (old_cpu != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) atomic_inc(&cpus_in_fadump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * We can't loop here indefinitely. Wait as long as fadump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * is in force. If we race with fadump un-registration this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * loop will break and then we go down to normal panic path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * and reboot. If fadump is in force the first crashing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * cpu will definitely trigger fadump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) while (fw_dump.dump_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) fdh = __va(fw_dump.fadumphdr_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) fdh->crashing_cpu = crashing_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) crash_save_vmcoreinfo();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) fdh->regs = *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ppc_save_regs(&fdh->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) fdh->online_mask = *cpu_online_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * If we came in via system reset, wait a while for the secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * CPUs to enter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (TRAP(&(fdh->regs)) == 0x100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) msecs = CRASH_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) fw_dump.ops->fadump_trigger(fdh, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct elf_prstatus prstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) memset(&prstatus, 0, sizeof(prstatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * FIXME: How do i get PID? Do I really need it?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * prstatus.pr_pid = ????
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) &prstatus, sizeof(prstatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) void fadump_update_elfcore_header(char *bufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct elf_phdr *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) bufp += sizeof(struct elfhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* First note is a place holder for cpu notes info. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) phdr = (struct elf_phdr *)bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (phdr->p_type == PT_NOTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) phdr->p_paddr = __pa(fw_dump.cpu_notes_buf_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) phdr->p_offset = phdr->p_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) phdr->p_filesz = fw_dump.cpu_notes_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) phdr->p_memsz = fw_dump.cpu_notes_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static void *fadump_alloc_buffer(unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unsigned long count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) count = PAGE_ALIGN(size) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) page = virt_to_page(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) mark_page_reserved(page + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) s32 fadump_setup_cpu_notes_buf(u32 num_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Allocate buffer to hold cpu crash notes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) fw_dump.cpu_notes_buf_vaddr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) (unsigned long)fadump_alloc_buffer(fw_dump.cpu_notes_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (!fw_dump.cpu_notes_buf_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) pr_err("Failed to allocate %ld bytes for CPU notes buffer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) fw_dump.cpu_notes_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) pr_debug("Allocated buffer for cpu notes of size %ld at 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) fw_dump.cpu_notes_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) fw_dump.cpu_notes_buf_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) void fadump_free_cpu_notes_buf(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (!fw_dump.cpu_notes_buf_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) fadump_free_buffer(fw_dump.cpu_notes_buf_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) fw_dump.cpu_notes_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) fw_dump.cpu_notes_buf_vaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) fw_dump.cpu_notes_buf_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (mrange_info->is_static) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) mrange_info->mem_range_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) kfree(mrange_info->mem_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * Allocate or reallocate mem_ranges array in incremental units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * of PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct fadump_memory_range *new_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) u64 new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) new_size = mrange_info->mem_ranges_sz + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pr_debug("Allocating %llu bytes of memory for %s memory ranges\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) new_size, mrange_info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) new_array = krealloc(mrange_info->mem_ranges, new_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (new_array == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) pr_err("Insufficient memory for setting up %s memory ranges\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) mrange_info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) fadump_free_mem_ranges(mrange_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) mrange_info->mem_ranges = new_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) mrange_info->mem_ranges_sz = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) mrange_info->max_mem_ranges = (new_size /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) sizeof(struct fadump_memory_range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) u64 base, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct fadump_memory_range *mem_ranges = mrange_info->mem_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) bool is_adjacent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u64 start, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (base == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * Fold adjacent memory ranges to bring down the memory ranges/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * PT_LOAD segments count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (mrange_info->mem_range_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if ((start + size) == base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) is_adjacent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (!is_adjacent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* resize the array on reaching the limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (mrange_info->is_static) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) pr_err("Reached array size limit for %s memory ranges\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) mrange_info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ret = fadump_alloc_mem_ranges(mrange_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* Update to the new resized array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) mem_ranges = mrange_info->mem_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) start = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) mem_ranges[mrange_info->mem_range_cnt].base = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) mrange_info->mem_range_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) mem_ranges[mrange_info->mem_range_cnt - 1].size = (end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) pr_debug("%s_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) mrange_info->name, (mrange_info->mem_range_cnt - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) start, end - 1, (end - start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static int fadump_exclude_reserved_area(u64 start, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) u64 ra_start, ra_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ra_start = fw_dump.reserve_dump_area_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ra_end = ra_start + fw_dump.reserve_dump_area_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if ((ra_start < end) && (ra_end > start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if ((start < ra_start) && (end > ra_end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ret = fadump_add_mem_range(&crash_mrange_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) start, ra_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ret = fadump_add_mem_range(&crash_mrange_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ra_end, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) } else if (start < ra_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ret = fadump_add_mem_range(&crash_mrange_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) start, ra_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) } else if (ra_end < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ret = fadump_add_mem_range(&crash_mrange_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ra_end, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ret = fadump_add_mem_range(&crash_mrange_info, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static int fadump_init_elfcore_header(char *bufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct elfhdr *elf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) elf = (struct elfhdr *) bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) bufp += sizeof(struct elfhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) memcpy(elf->e_ident, ELFMAG, SELFMAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) elf->e_ident[EI_CLASS] = ELF_CLASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) elf->e_ident[EI_DATA] = ELF_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) elf->e_ident[EI_VERSION] = EV_CURRENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) elf->e_ident[EI_OSABI] = ELF_OSABI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) elf->e_type = ET_CORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) elf->e_machine = ELF_ARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) elf->e_version = EV_CURRENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) elf->e_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) elf->e_phoff = sizeof(struct elfhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) elf->e_shoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) #if defined(_CALL_ELF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) elf->e_flags = _CALL_ELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) elf->e_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) elf->e_ehsize = sizeof(struct elfhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) elf->e_phentsize = sizeof(struct elf_phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) elf->e_phnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) elf->e_shentsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) elf->e_shnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) elf->e_shstrndx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * Traverse through memblock structure and setup crash memory ranges. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * ranges will be used create PT_LOAD program headers in elfcore header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static int fadump_setup_crash_memory_ranges(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) u64 i, start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) pr_debug("Setup crash memory ranges.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) crash_mrange_info.mem_range_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * Boot memory region(s) registered with firmware are moved to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * different location at the time of crash. Create separate program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * header(s) for this memory chunk(s) with the correct offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) start = fw_dump.boot_mem_addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) end = start + fw_dump.boot_mem_sz[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ret = fadump_add_mem_range(&crash_mrange_info, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) for_each_mem_range(i, &start, &end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * skip the memory chunk that is already added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * (0 through boot_memory_top).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (start < fw_dump.boot_mem_top) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (end > fw_dump.boot_mem_top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) start = fw_dump.boot_mem_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* add this range excluding the reserved dump area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ret = fadump_exclude_reserved_area(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * If the given physical address falls within the boot memory region then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * return the relocated address that points to the dump region reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * for saving initial boot memory contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static inline unsigned long fadump_relocate(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) unsigned long raddr, rstart, rend, rlast, hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) hole_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) rlast = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) raddr = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) rstart = fw_dump.boot_mem_addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) rend = rstart + fw_dump.boot_mem_sz[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) hole_size += (rstart - rlast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (paddr >= rstart && paddr < rend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) raddr += fw_dump.boot_mem_dest_addr - hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) rlast = rend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) pr_debug("vmcoreinfo: paddr = 0x%lx, raddr = 0x%lx\n", paddr, raddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static int fadump_create_elfcore_headers(char *bufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) unsigned long long raddr, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct elf_phdr *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct elfhdr *elf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) fadump_init_elfcore_header(bufp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) elf = (struct elfhdr *)bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) bufp += sizeof(struct elfhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * setup ELF PT_NOTE, place holder for cpu notes info. The notes info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * will be populated during second kernel boot after crash. Hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * this PT_NOTE will always be the first elf note.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * NOTE: Any new ELF note addition should be placed after this note.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) phdr = (struct elf_phdr *)bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) bufp += sizeof(struct elf_phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) phdr->p_type = PT_NOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) phdr->p_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) phdr->p_vaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) phdr->p_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) phdr->p_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) phdr->p_paddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) phdr->p_filesz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) phdr->p_memsz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) (elf->e_phnum)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* setup ELF PT_NOTE for vmcoreinfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) phdr = (struct elf_phdr *)bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) bufp += sizeof(struct elf_phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) phdr->p_type = PT_NOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) phdr->p_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) phdr->p_vaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) phdr->p_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) phdr->p_offset = phdr->p_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /* Increment number of program headers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) (elf->e_phnum)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* setup PT_LOAD sections. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) raddr = fw_dump.boot_mem_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) for (i = 0; i < crash_mrange_info.mem_range_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) u64 mbase, msize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) mbase = crash_mrange_info.mem_ranges[i].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) msize = crash_mrange_info.mem_ranges[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (!msize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) phdr = (struct elf_phdr *)bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) bufp += sizeof(struct elf_phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) phdr->p_type = PT_LOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) phdr->p_flags = PF_R|PF_W|PF_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) phdr->p_offset = mbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (mbase == raddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * The entire real memory region will be moved by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * firmware to the specified destination_address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Hence set the correct offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) phdr->p_offset = fw_dump.boot_mem_dest_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (j < (fw_dump.boot_mem_regs_cnt - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) offset += fw_dump.boot_mem_sz[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) raddr = fw_dump.boot_mem_addr[++j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) phdr->p_paddr = mbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) phdr->p_vaddr = (unsigned long)__va(mbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) phdr->p_filesz = msize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) phdr->p_memsz = msize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) phdr->p_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* Increment number of program headers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) (elf->e_phnum)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static unsigned long init_fadump_header(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct fadump_crash_info_header *fdh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) fdh = __va(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) addr += sizeof(struct fadump_crash_info_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) memset(fdh, 0, sizeof(struct fadump_crash_info_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) fdh->elfcorehdr_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* We will set the crashing cpu id in crash_fadump() during crash. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static int register_fadump(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * If no memory is reserved then we can not register for firmware-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * assisted dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (!fw_dump.reserve_dump_area_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = fadump_setup_crash_memory_ranges();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) addr = fw_dump.fadumphdr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* Initialize fadump crash info header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) addr = init_fadump_header(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) vaddr = __va(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) pr_debug("Creating ELF core headers at %#016lx\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) fadump_create_elfcore_headers(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* register the future kernel dump with firmware. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) pr_debug("Registering for firmware-assisted kernel dump...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return fw_dump.ops->fadump_register(&fw_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) void fadump_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (!fw_dump.fadump_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Invalidate the registration only if dump is active. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (fw_dump.dump_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) pr_debug("Invalidating firmware-assisted dump registration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) fw_dump.ops->fadump_invalidate(&fw_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) } else if (fw_dump.dump_registered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* Un-register Firmware-assisted dump if it was registered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) fw_dump.ops->fadump_unregister(&fw_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) fadump_free_mem_ranges(&crash_mrange_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (fw_dump.ops->fadump_cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) fw_dump.ops->fadump_cleanup(&fw_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static void fadump_free_reserved_memory(unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) unsigned long end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) unsigned long time_limit = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) pr_info("freeing reserved memory (0x%llx - 0x%llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) for (pfn = start_pfn; pfn < end_pfn; pfn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) free_reserved_page(pfn_to_page(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (time_after(jiffies, time_limit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) time_limit = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * Skip memory holes and free memory that was actually reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static void fadump_release_reserved_area(u64 start, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) unsigned long reg_spfn, reg_epfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) u64 tstart, tend, spfn, epfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) spfn = PHYS_PFN(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) epfn = PHYS_PFN(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) tstart = max_t(u64, spfn, reg_spfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) tend = min_t(u64, epfn, reg_epfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (tstart < tend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) fadump_free_reserved_memory(tstart, tend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (tend == epfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) spfn = tend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * Sort the mem ranges in-place and merge adjacent ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * to minimize the memory ranges count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct fadump_memory_range *mem_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct fadump_memory_range tmp_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) u64 base, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) int i, j, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (!reserved_mrange_info.mem_range_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* Sort the memory ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) mem_ranges = mrange_info->mem_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) for (i = 0; i < mrange_info->mem_range_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) for (j = (i + 1); j < mrange_info->mem_range_cnt; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (mem_ranges[idx].base > mem_ranges[j].base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) idx = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (idx != i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) tmp_range = mem_ranges[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) mem_ranges[idx] = mem_ranges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) mem_ranges[i] = tmp_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /* Merge adjacent reserved ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) for (i = 1; i < mrange_info->mem_range_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) base = mem_ranges[i-1].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) size = mem_ranges[i-1].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (mem_ranges[i].base == (base + size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) mem_ranges[idx].size += mem_ranges[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (i == idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) mem_ranges[idx] = mem_ranges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) mrange_info->mem_range_cnt = idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * Scan reserved-ranges to consider them while reserving/releasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * memory for FADump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) const __be32 *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int len, ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* reserved-ranges already scanned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (reserved_mrange_info.mem_range_cnt != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (!prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * Each reserved range is an (address,size) pair, 2 cells each,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * totalling 4 cells per range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) u64 base, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) base = of_read_number(prop + (i * 4) + 0, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) size = of_read_number(prop + (i * 4) + 2, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) ret = fadump_add_mem_range(&reserved_mrange_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) base, base + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) pr_warn("some reserved ranges are ignored!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /* Compact reserved ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) sort_and_merge_mem_ranges(&reserved_mrange_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * Release the memory that was reserved during early boot to preserve the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * crash'ed kernel's memory contents except reserved dump area (permanent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * reservation) and reserved ranges used by F/W. The released memory will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * be available for general use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static void fadump_release_memory(u64 begin, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) u64 ra_start, ra_end, tstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) ra_start = fw_dump.reserve_dump_area_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ra_end = ra_start + fw_dump.reserve_dump_area_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * If reserved ranges array limit is hit, overwrite the last reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * memory range with reserved dump area to ensure it is excluded from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * the memory being released (reused for next FADump registration).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (reserved_mrange_info.mem_range_cnt ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) reserved_mrange_info.max_mem_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) reserved_mrange_info.mem_range_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* Get the reserved ranges list in order first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) sort_and_merge_mem_ranges(&reserved_mrange_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /* Exclude reserved ranges and release remaining memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) tstart = begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) for (i = 0; i < reserved_mrange_info.mem_range_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) ra_start = reserved_mrange_info.mem_ranges[i].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) ra_end = ra_start + reserved_mrange_info.mem_ranges[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (tstart >= ra_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (tstart < ra_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) fadump_release_reserved_area(tstart, ra_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) tstart = ra_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (tstart < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) fadump_release_reserved_area(tstart, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static void fadump_invalidate_release_mem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) mutex_lock(&fadump_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (!fw_dump.dump_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) mutex_unlock(&fadump_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) fadump_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) mutex_unlock(&fadump_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) fadump_free_cpu_notes_buf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * Setup kernel metadata and initialize the kernel dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * memory structure for FADump re-registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (fw_dump.ops->fadump_setup_metadata &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) pr_warn("Failed to setup kernel metadata!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) fw_dump.ops->fadump_init_mem_struct(&fw_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static ssize_t release_mem_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) int input = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (!fw_dump.dump_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (kstrtoint(buf, 0, &input))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (input == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * Take away the '/proc/vmcore'. We are releasing the dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * memory, hence it will not be valid anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) #ifdef CONFIG_PROC_VMCORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) vmcore_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) fadump_invalidate_release_mem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /* Release the reserved memory and disable the FADump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) static void unregister_fadump(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) fadump_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) fadump_release_memory(fw_dump.reserve_dump_area_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) fw_dump.reserve_dump_area_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) fw_dump.fadump_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) kobject_put(fadump_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static ssize_t enabled_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static ssize_t mem_reserved_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return sprintf(buf, "%ld\n", fw_dump.reserve_dump_area_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) static ssize_t registered_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return sprintf(buf, "%d\n", fw_dump.dump_registered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) static ssize_t registered_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) int input = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (!fw_dump.fadump_enabled || fw_dump.dump_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (kstrtoint(buf, 0, &input))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) mutex_lock(&fadump_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) switch (input) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (fw_dump.dump_registered == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* Un-register Firmware-assisted dump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) pr_debug("Un-register firmware-assisted dump\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) fw_dump.ops->fadump_unregister(&fw_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (fw_dump.dump_registered == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /* Un-register Firmware-assisted dump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) fw_dump.ops->fadump_unregister(&fw_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /* Register Firmware-assisted dump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ret = register_fadump();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) mutex_unlock(&fadump_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return ret < 0 ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int fadump_region_show(struct seq_file *m, void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (!fw_dump.fadump_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) mutex_lock(&fadump_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) fw_dump.ops->fadump_region_show(&fw_dump, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) mutex_unlock(&fadump_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) static struct kobj_attribute release_attr = __ATTR_WO(release_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) static struct kobj_attribute enable_attr = __ATTR_RO(enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static struct kobj_attribute register_attr = __ATTR_RW(registered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static struct attribute *fadump_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) &enable_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) ®ister_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) &mem_reserved_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) ATTRIBUTE_GROUPS(fadump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) DEFINE_SHOW_ATTRIBUTE(fadump_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static void fadump_init_files(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) fadump_kobj = kobject_create_and_add("fadump", kernel_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (!fadump_kobj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) pr_err("failed to create fadump kobject\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) debugfs_create_file("fadump_region", 0444, powerpc_debugfs_root, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) &fadump_region_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (fw_dump.dump_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) rc = sysfs_create_file(fadump_kobj, &release_attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) pr_err("unable to create release_mem sysfs file (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) rc = sysfs_create_groups(fadump_kobj, fadump_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) pr_err("sysfs group creation failed (%d), unregistering FADump",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) unregister_fadump();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * The FADump sysfs are moved from kernel_kobj to fadump_kobj need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * create symlink at old location to maintain backward compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * - fadump_enabled -> fadump/enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * - fadump_registered -> fadump/registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * - fadump_release_mem -> fadump/release_mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) "enabled", "fadump_enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) pr_err("unable to create fadump_enabled symlink (%d)", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) "registered",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) "fadump_registered");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) pr_err("unable to create fadump_registered symlink (%d)", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) sysfs_remove_link(kernel_kobj, "fadump_enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (fw_dump.dump_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) fadump_kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) "release_mem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) "fadump_release_mem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) pr_err("unable to create fadump_release_mem symlink (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * Prepare for firmware-assisted dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) int __init setup_fadump(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (!fw_dump.fadump_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) fadump_init_files();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) fadump_show_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (!fw_dump.fadump_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * If dump data is available then see if it is valid and prepare for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * saving it to the disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (fw_dump.dump_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) * if dump process fails then invalidate the registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * and release memory before proceeding for re-registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (fw_dump.ops->fadump_process(&fw_dump) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) fadump_invalidate_release_mem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /* Initialize the kernel dump memory structure for FAD registration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) else if (fw_dump.reserve_dump_area_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) fw_dump.ops->fadump_init_mem_struct(&fw_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * In case of panic, fadump is triggered via ppc_panic_event()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * panic notifier. Setting crash_kexec_post_notifiers to 'true'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * lets panic() function take crash friendly path before panic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * notifiers are invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) crash_kexec_post_notifiers = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) subsys_initcall(setup_fadump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) #else /* !CONFIG_PRESERVE_FA_DUMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /* Scan the Firmware Assisted dump configuration details. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) int depth, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if ((depth != 1) || (strcmp(uname, "ibm,opal") != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) opal_fadump_dt_scan(&fw_dump, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * preserve crash data. The subsequent memory preserving kernel boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * is likely to process this crash data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) int __init fadump_reserve_mem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (fw_dump.dump_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * If last boot has crashed then reserve all the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * above boot memory to preserve crash data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) pr_info("Preserving crash data for processing in next boot.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) fadump_reserve_crash_area(fw_dump.boot_mem_top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) pr_debug("FADump-aware kernel..\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) #endif /* CONFIG_PRESERVE_FA_DUMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) /* Preserve everything above the base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static void __init fadump_reserve_crash_area(u64 base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) u64 i, mstart, mend, msize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) for_each_mem_range(i, &mstart, &mend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) msize = mend - mstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if ((mstart + msize) < base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (mstart < base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) msize -= (base - mstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) mstart = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) pr_info("Reserving %lluMB of memory at %#016llx for preserving crash data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) (msize >> 20), mstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) memblock_reserve(mstart, msize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) unsigned long __init arch_reserved_kernel_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return memblock_reserved_size() / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }