^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/kernel/setup.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1995-2001 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/initrd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/screen_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/root_dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/of_fdt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/psci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/daifflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/cpu_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/kasan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static int num_standard_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static struct resource *standard_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) phys_addr_t __fdt_pointer __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Standard memory resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static struct resource mem_res[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) .name = "Kernel code",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .start = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) .end = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .flags = IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) .name = "Kernel data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) .start = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) .end = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) .flags = IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define kernel_code mem_res[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define kernel_data mem_res[1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * The recorded values of x0 .. x3 upon kernel entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u64 __cacheline_aligned boot_args[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void __init smp_setup_processor_id(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) set_cpu_logical_map(0, mpidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * clear __my_cpu_offset on boot CPU to avoid hang caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * using percpu variable early, for example, lockdep will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * access percpu variable inside lock_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) set_my_cpu_offset(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) (unsigned long)mpidr, read_cpuid_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return phys_id == cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct mpidr_hash mpidr_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * level in order to build a linear index from an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * MPIDR value. Resulting algorithm is a collision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * free hash carried out through shifting and ORing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void __init smp_build_mpidr_hash(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 i, affinity, fs[4], bits[4], ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u64 mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Pre-scan the list of MPIDRS and filter out bits that do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * not contribute to affinity levels, ie they never toggle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pr_debug("mask of set bits %#llx\n", mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Find and stash the last and first bit set at all affinity levels to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * check how many bits are required to represent them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) affinity = MPIDR_AFFINITY_LEVEL(mask, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Find the MSB bit and LSB bits position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * to determine how many bits are required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * to express the affinity level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ls = fls(affinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) fs[i] = affinity ? ffs(affinity) - 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) bits[i] = ls - fs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * An index can be created from the MPIDR_EL1 by isolating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * significant bits at each affinity level and by shifting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * them in order to compress the 32 bits values space to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * compressed set of values. This is equivalent to hashing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * the MPIDR_EL1 through shifting and ORing. It is a collision free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * hash though not minimal since some levels might contain a number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * of CPUs that is not an exact power of 2 and their bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) (bits[1] + bits[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) fs[3] - (bits[2] + bits[1] + bits[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mpidr_hash.mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) mpidr_hash.shift_aff[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) mpidr_hash.shift_aff[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) mpidr_hash.shift_aff[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) mpidr_hash.shift_aff[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mpidr_hash.mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) mpidr_hash.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * 4x is an arbitrary value used to warn on a hash table much bigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * than expected on most systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (mpidr_hash_size() > 4 * num_possible_cpus())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) pr_warn("Large number of MPIDR hash buckets detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void *early_fdt_ptr __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void __init *get_early_fdt_ptr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return early_fdt_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) asmlinkage void __init early_fdt_map(u64 dt_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int fdt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) early_fixmap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void __init setup_machine_fdt(phys_addr_t dt_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (dt_virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) memblock_reserve(dt_phys, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!dt_virt || !early_init_dt_scan(dt_virt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pr_crit("\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) "\nPlease check your bootloader.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) &dt_phys, dt_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) while (true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* Early fixups are done, map the FDT as read-only now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) name = of_flat_dt_get_machine_name();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pr_info("Machine model: %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dump_stack_set_arch_desc("%s (DT)", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static void __init request_standard_resources(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct memblock_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned long i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) size_t res_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) kernel_code.start = __pa_symbol(_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) kernel_code.end = __pa_symbol(__init_begin - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) kernel_data.start = __pa_symbol(_sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) kernel_data.end = __pa_symbol(_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) num_standard_resources = memblock.memory.cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) res_size = num_standard_resources * sizeof(*standard_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!standard_resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) for_each_mem_region(region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) res = &standard_resources[i++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (memblock_is_nomap(region)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) res->name = "reserved";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) res->flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) res->name = "System RAM";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) request_resource(&iomem_resource, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (kernel_code.start >= res->start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) kernel_code.end <= res->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) request_resource(res, &kernel_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (kernel_data.start >= res->start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) kernel_data.end <= res->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) request_resource(res, &kernel_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #ifdef CONFIG_KEXEC_CORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Userspace will find "Crash kernel" region in /proc/iomem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (crashk_res.end && crashk_res.start >= res->start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) crashk_res.end <= res->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) request_resource(res, &crashk_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static int __init reserve_memblock_reserved_regions(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u64 i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) for (i = 0; i < num_standard_resources; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct resource *mem = &standard_resources[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) phys_addr_t r_start, r_end, mem_size = resource_size(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!memblock_is_region_reserved(mem->start, mem_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) for_each_reserved_mem_range(j, &r_start, &r_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) resource_size_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (start > mem->end || end < mem->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) reserve_region_with_split(mem, start, end, "reserved");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) arch_initcall(reserve_memblock_reserved_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u64 cpu_logical_map(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return __cpu_logical_map[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) void __init __no_sanitize_address setup_arch(char **cmdline_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) init_mm.start_code = (unsigned long) _text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) init_mm.end_code = (unsigned long) _etext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) init_mm.end_data = (unsigned long) _edata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) init_mm.brk = (unsigned long) _end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) *cmdline_p = boot_command_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * If know now we are going to need KPTI then use non-global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * mappings from the start, avoiding the cost of rewriting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * everything later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) arm64_use_ng_mappings = kaslr_requires_kpti();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) early_fixmap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) early_ioremap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) setup_machine_fdt(__fdt_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * Initialise the static keys early as they may be enabled by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * cpufeature code and early parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) jump_label_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) parse_early_param();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * Unmask asynchronous aborts and fiq after bringing up possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * earlycon. (Report possible System Errors once we can report this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * occurred).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) local_daif_restore(DAIF_PROCCTX_NOIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * TTBR0 is only used for the identity mapping at this stage. Make it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * point to zero page to avoid speculatively fetching new entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) cpu_uninstall_idmap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) xen_early_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) efi_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) arm64_memblock_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) paging_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) acpi_table_upgrade();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Parse the ACPI tables for possible boot-time configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) acpi_boot_table_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (acpi_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unflatten_device_tree();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) bootmem_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) kasan_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) request_standard_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) early_ioremap_reset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (acpi_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) psci_dt_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) psci_acpi_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) init_bootcpu_ops();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) smp_init_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) smp_build_mpidr_hash();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* Init percpu seeds for random tags after cpus are set up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) kasan_init_sw_tags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #ifdef CONFIG_ARM64_SW_TTBR0_PAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * Make sure init_thread_info.ttbr0 always generates translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * faults in case uaccess_enable() is inadvertently called by the init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (boot_args[1] || boot_args[2] || boot_args[3]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) "This indicates a broken bootloader or old kernel\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) boot_args[1], boot_args[2], boot_args[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static inline bool cpu_can_disable(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) const struct cpu_operations *ops = get_cpu_ops(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (ops && ops->cpu_can_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return ops->cpu_can_disable(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int __init topology_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) for_each_online_node(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) register_one_node(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) cpu->hotpluggable = cpu_can_disable(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) register_cpu(cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) subsys_initcall(topology_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static void dump_kernel_offset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) const unsigned long offset = kaslr_offset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) offset, KIMAGE_VADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) pr_emerg("Kernel Offset: disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static int arm64_panic_block_dump(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) unsigned long v, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dump_kernel_offset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dump_cpu_features();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dump_mem_limit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static struct notifier_block arm64_panic_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) .notifier_call = arm64_panic_block_dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int __init register_arm64_panic_block(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) atomic_notifier_chain_register(&panic_notifier_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) &arm64_panic_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) device_initcall(register_arm64_panic_block);