Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  Copyright (C) 1995  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * This file contains the setup_arch() code, which handles the architecture-dependent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * parts of early kernel initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/init_ohci1394_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/initrd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/iscsi_ibft.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/root_dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/sfi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/tboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/usb/xhci-dbgp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/static_call.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/swiotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <uapi/linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/bios_ebda.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/bugs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <asm/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <asm/gart.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <asm/io_apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <asm/kasan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <asm/kaslr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <asm/mtrr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <asm/realmode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <asm/olpc_ofw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <asm/pci-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <asm/proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <asm/vsyscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * max_low_pfn_mapped: highest directly mapped pfn < 4 GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * max_pfn_mapped:     highest directly mapped pfn > 4 GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * represented by pfn_mapped[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) unsigned long max_low_pfn_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) unsigned long max_pfn_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #ifdef CONFIG_DMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) RESERVE_BRK(dmi_alloc, 65536);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * Range of the BSS area. The size of the BSS area is determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  * at link time, with RESERVE_BRK*() facility reserving additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  * chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) unsigned long _brk_start = (unsigned long)__brk_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) unsigned long _brk_end   = (unsigned long)__brk_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) struct boot_params boot_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * These are the four main kernel memory regions, we put them into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * the resource tree so that kdump tools and other debugging tools
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  * recover it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static struct resource rodata_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	.name	= "Kernel rodata",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	.start	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	.end	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static struct resource data_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	.name	= "Kernel data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	.start	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	.end	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) static struct resource code_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	.name	= "Kernel code",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	.start	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	.end	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static struct resource bss_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	.name	= "Kernel bss",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	.start	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	.end	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) /* CPU data as detected by the assembly code in head_32.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) struct cpuinfo_x86 new_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) /* Common CPU data for all CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) struct cpuinfo_x86 boot_cpu_data __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) EXPORT_SYMBOL(boot_cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) unsigned int def_to_bigsmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) /* For MCA, but anyone else can use it if they want */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) unsigned int machine_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) unsigned int machine_submodel_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) unsigned int BIOS_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) struct apm_info apm_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) EXPORT_SYMBOL(apm_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) struct ist_info ist_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) EXPORT_SYMBOL(ist_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) struct ist_info ist_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) struct cpuinfo_x86 boot_cpu_data __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) EXPORT_SYMBOL(boot_cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) __visible unsigned long mmu_cr4_features __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) int bootloader_type, bootloader_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * Setup options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) struct screen_info screen_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) EXPORT_SYMBOL(screen_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) struct edid_info edid_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) EXPORT_SYMBOL_GPL(edid_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) extern int root_mountflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) unsigned long saved_video_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define RAMDISK_IMAGE_START_MASK	0x07FF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define RAMDISK_PROMPT_FLAG		0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define RAMDISK_LOAD_FLAG		0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static char __initdata command_line[COMMAND_LINE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #ifdef CONFIG_CMDLINE_BOOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) struct edd edd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #ifdef CONFIG_EDD_MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) EXPORT_SYMBOL(edd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * copy_edd() - Copy the BIOS EDD information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  *              from boot_params into a safe place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) static inline void __init copy_edd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)      memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	    sizeof(edd.mbr_signature));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)      memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)      edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)      edd.edd_info_nr = boot_params.eddbuf_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) static inline void __init copy_edd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) void * __init extend_brk(size_t size, size_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	size_t mask = align - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	BUG_ON(_brk_start == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	BUG_ON(align & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	_brk_end = (_brk_end + mask) & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	BUG_ON((char *)(_brk_end + size) > __brk_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	ret = (void *)_brk_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	_brk_end += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	memset(ret, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static void __init cleanup_highmap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static void __init reserve_brk(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (_brk_end > _brk_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		memblock_reserve(__pa_symbol(_brk_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 				 _brk_end - _brk_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	/* Mark brk area as locked down and no longer taking any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	   new allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	_brk_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) u64 relocated_ramdisk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static u64 __init get_ramdisk_image(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (ramdisk_image == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		ramdisk_image = phys_initrd_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	return ramdisk_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static u64 __init get_ramdisk_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	u64 ramdisk_size = boot_params.hdr.ramdisk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	if (ramdisk_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		ramdisk_size = phys_initrd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	return ramdisk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static void __init relocate_initrd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	/* Assume only end is not page aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	u64 ramdisk_image = get_ramdisk_image();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	u64 ramdisk_size  = get_ramdisk_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	u64 area_size     = PAGE_ALIGN(ramdisk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	/* We need to move the initrd down into directly mapped mem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 						      PFN_PHYS(max_pfn_mapped));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (!relocated_ramdisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		panic("Cannot find place for new RAMDISK of size %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		      ramdisk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	initrd_start = relocated_ramdisk + PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	initrd_end   = initrd_start + ramdisk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	       relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		" [mem %#010llx-%#010llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		ramdisk_image, ramdisk_image + ramdisk_size - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static void __init early_reserve_initrd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	/* Assume only end is not page aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	u64 ramdisk_image = get_ramdisk_image();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	u64 ramdisk_size  = get_ramdisk_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (!boot_params.hdr.type_of_loader ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	    !ramdisk_image || !ramdisk_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		return;		/* No initrd provided by bootloader */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static void __init reserve_initrd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	/* Assume only end is not page aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	u64 ramdisk_image = get_ramdisk_image();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	u64 ramdisk_size  = get_ramdisk_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (!boot_params.hdr.type_of_loader ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	    !ramdisk_image || !ramdisk_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		return;		/* No initrd provided by bootloader */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	initrd_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 			ramdisk_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 				PFN_DOWN(ramdisk_end))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		/* All are mapped, easy case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		initrd_start = ramdisk_image + PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		initrd_end = initrd_start + ramdisk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	relocate_initrd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) static void __init early_reserve_initrd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) static void __init reserve_initrd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) #endif /* CONFIG_BLK_DEV_INITRD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static void __init parse_setup_data(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	struct setup_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	u64 pa_data, pa_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	pa_data = boot_params.hdr.setup_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	while (pa_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		u32 data_len, data_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		data = early_memremap(pa_data, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		data_len = data->len + sizeof(struct setup_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		data_type = data->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		pa_next = data->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		early_memunmap(data, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		switch (data_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		case SETUP_E820_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			e820__memory_setup_extended(pa_data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		case SETUP_DTB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			add_dtb(pa_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		case SETUP_EFI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			parse_efi_setup(pa_data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		pa_data = pa_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) static void __init memblock_x86_reserve_range_setup_data(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct setup_indirect *indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct setup_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	u64 pa_data, pa_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	pa_data = boot_params.hdr.setup_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	while (pa_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		data = early_memremap(pa_data, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			pr_warn("setup: failed to memremap setup_data entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		len = sizeof(*data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		pa_next = data->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		memblock_reserve(pa_data, sizeof(*data) + data->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		if (data->type == SETUP_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			len += data->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			early_memunmap(data, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			data = early_memremap(pa_data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				pr_warn("setup: failed to memremap indirect setup_data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			indirect = (struct setup_indirect *)data->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			if (indirect->type != SETUP_INDIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 				memblock_reserve(indirect->addr, indirect->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		pa_data = pa_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		early_memunmap(data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  * --------- Crashkernel reservation ------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) #ifdef CONFIG_KEXEC_CORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) /* 16M alignment for crash kernel regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) #define CRASH_ALIGN		SZ_16M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * Keep the crash kernel below this limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  * Earlier 32-bits kernels would limit the kernel to the low 512 MB range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  * due to mapping restrictions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * 64-bit kdump kernels need to be restricted to be under 64 TB, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  * the upper limit of system RAM in 4-level paging mode. Since the kdump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  * jump could be from 5-level paging to 4-level paging, the jump will fail if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * the kernel is put above 64 TB, and during the 1st kernel bootup there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  * no good way to detect the paging mode of the target kernel which will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  * loaded for dumping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) # define CRASH_ADDR_LOW_MAX	SZ_512M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) # define CRASH_ADDR_HIGH_MAX	SZ_512M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) # define CRASH_ADDR_LOW_MAX	SZ_4G
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) # define CRASH_ADDR_HIGH_MAX	SZ_64T
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static int __init reserve_crashkernel_low(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	unsigned long long base, low_base = 0, low_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	unsigned long low_mem_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	low_mem_limit = min(memblock_phys_mem_size(), CRASH_ADDR_LOW_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	/* crashkernel=Y,low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	ret = parse_crashkernel_low(boot_command_line, low_mem_limit, &low_size, &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		 * two parts from kernel/dma/swiotlb.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		 * -swiotlb size: user-specified with swiotlb= or default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		 * -swiotlb overflow buffer: now hardcoded to 32k. We round it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		 * to 8M for other buffers that may need to stay low too. Also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		 * make sure we allocate enough extra low memory so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		 * don't run out of DMA buffers for 32-bit devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		/* passed with crashkernel=0,low ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		if (!low_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (!low_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		       (unsigned long)(low_size >> 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (low RAM limit: %ldMB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		(unsigned long)(low_size >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		(unsigned long)(low_base >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		(unsigned long)(low_mem_limit >> 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	crashk_low_res.start = low_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	crashk_low_res.end   = low_base + low_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	insert_resource(&iomem_resource, &crashk_low_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) static void __init reserve_crashkernel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	unsigned long long crash_size, crash_base, total_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	bool high = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	total_mem = memblock_phys_mem_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	/* crashkernel=XM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if (ret != 0 || crash_size <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		/* crashkernel=X,high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		ret = parse_crashkernel_high(boot_command_line, total_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 					     &crash_size, &crash_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		if (ret != 0 || crash_size <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		high = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	if (xen_pv_domain()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		pr_info("Ignoring crashkernel for a Xen PV domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	/* 0 means: find the address automatically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	if (!crash_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		 * Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		 * crashkernel=x,high reserves memory over 4G, also allocates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		 * 256M extra low memory for DMA buffers and swiotlb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		 * But the extra memory is not required for all machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		 * So try low memory first and fall back to high memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		 * unless "crashkernel=size[KMG],high" is specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		if (!high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			crash_base = memblock_phys_alloc_range(crash_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 						CRASH_ALIGN, CRASH_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 						CRASH_ADDR_LOW_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		if (!crash_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 			crash_base = memblock_phys_alloc_range(crash_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 						CRASH_ALIGN, CRASH_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 						CRASH_ADDR_HIGH_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		if (!crash_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			pr_info("crashkernel reservation failed - No suitable area found.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		unsigned long long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		start = memblock_phys_alloc_range(crash_size, SZ_1M, crash_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 						  crash_base + crash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		if (start != crash_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			pr_info("crashkernel reservation failed - memory is in use.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		memblock_free(crash_base, crash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		(unsigned long)(crash_size >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		(unsigned long)(crash_base >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		(unsigned long)(total_mem >> 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	crashk_res.start = crash_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	crashk_res.end   = crash_base + crash_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	insert_resource(&iomem_resource, &crashk_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static void __init reserve_crashkernel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) static struct resource standard_io_resources[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	{ .name = "dma1", .start = 0x00, .end = 0x1f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	{ .name = "pic1", .start = 0x20, .end = 0x21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	{ .name = "timer0", .start = 0x40, .end = 0x43,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	{ .name = "timer1", .start = 0x50, .end = 0x53,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	{ .name = "keyboard", .start = 0x60, .end = 0x60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	{ .name = "keyboard", .start = 0x64, .end = 0x64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	{ .name = "fpu", .start = 0xf0, .end = 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) void __init reserve_standard_io_resources(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	/* request I/O space for devices used on all i[345]86 PCs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		request_resource(&ioport_resource, &standard_io_resources[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) static __init void reserve_ibft_region(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	unsigned long addr, size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	addr = find_ibft_region(&size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		memblock_reserve(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static bool __init snb_gfx_workaround_needed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	u16 vendor, devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	static const __initconst u16 snb_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		0x0102,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		0x0112,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		0x0122,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		0x0106,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		0x0116,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		0x0126,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		0x010a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	/* Assume no if something weird is going on with PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	if (!early_pci_allowed())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (vendor != 0x8086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		if (devid == snb_ids[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * Sandy Bridge graphics has trouble with certain ranges, exclude
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * them from allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) static void __init trim_snb_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	static const __initconst unsigned long bad_pages[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		0x20050000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		0x20110000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		0x20130000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		0x20138000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		0x40004000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (!snb_gfx_workaround_needed())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 * Reserve all memory below the 1 MB mark that has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 * already been reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	memblock_reserve(0, 1<<20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		if (memblock_reserve(bad_pages[i], PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			printk(KERN_WARNING "failed to reserve 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			       bad_pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  * Here we put platform-specific memory range workarounds, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  * memory known to be corrupt or otherwise in need to be reserved on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * specific platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * If this gets used more widely it could use a real dispatch mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) static void __init trim_platform_memory_ranges(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	trim_snb_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) static void __init trim_bios_range(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	 * A special case is the first 4Kb of memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 * This is a BIOS owned area, not kernel ram, but generally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 * not listed as such in the E820 table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	 * This typically reserves additional memory (64KiB by default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	 * since some BIOSes are known to corrupt low memory.  See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	 * Kconfig help text for X86_RESERVE_LOW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	 * special case: Some BIOSes report the PC BIOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	 * area (640Kb -> 1Mb) as RAM even though it is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	 * take them out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	e820__update_table(e820_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) /* called before trim_bios_range() to spare extra sanitize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) static void __init e820_add_kernel_range(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	u64 start = __pa_symbol(_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	u64 size = __pa_symbol(_end) - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * attempt to fix it by adding the range. We may have a confused BIOS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 * exclude kernel range. If we really are running on top non-RAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	 * we will crash later anyways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (e820__mapped_all(start, start + size, E820_TYPE_RAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	e820__range_remove(start, size, E820_TYPE_RAM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	e820__range_add(start, size, E820_TYPE_RAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) static int __init parse_reservelow(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	unsigned long long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	size = memparse(p, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (size < 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		size = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	if (size > 640*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		size = 640*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	reserve_low = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) early_param("reservelow", parse_reservelow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static void __init trim_low_memory_range(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  * Dump out kernel offset information on panic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	if (kaslr_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			 kaslr_offset(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			 __START_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			 __START_KERNEL_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			 MODULES_VADDR-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		pr_emerg("Kernel Offset: disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  * Determine if we were loaded by an EFI loader.  If so, then we have also been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  * passed the efi memmap, systab, etc., so we should use these data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  * for initialization.  Note, the efi init code path is determined by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  * global efi_enabled. This allows the same kernel image to be used on existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * systems (with a traditional BIOS) as well as on EFI systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  * setup_arch - architecture-specific boot-time initializations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788)  * Note: On x86_64, fixmaps are ready for use even before this is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) void __init setup_arch(char **cmdline_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	 * Reserve the memory occupied by the kernel between _text and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	 * __end_of_kernel_reserve symbols. Any kernel sections after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	 * __end_of_kernel_reserve symbol must be explicitly reserved with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	 * separate memblock_reserve() or they will be discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	memblock_reserve(__pa_symbol(_text),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			 (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 * Make sure page 0 is always reserved because on systems with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	 * L1TF its contents can be leaked to user processes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	memblock_reserve(0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	early_reserve_initrd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	 * At this point everything still needed from the boot loader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	 * or BIOS or kernel text should be early reserved or marked not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	 * RAM in e820. All other memory is free game.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 * copy kernel address range established so far and switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 * to the proper swapper page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	clone_pgd_range(swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			initial_page_table + KERNEL_PGD_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			KERNEL_PGD_PTRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	load_cr3(swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 * a cr3 based tlb flush, so the following __flush_tlb_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	 * will not flush anything because the CPU quirk which clears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 * X86_FEATURE_PGE has not been invoked yet. Though due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 * load_cr3() above the TLB has been flushed already. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 * quirk is invoked before subsequent calls to __flush_tlb_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	 * so proper operation is guaranteed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	__flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	printk(KERN_INFO "Command line: %s\n", boot_command_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	 * If we have OLPC OFW, we might end up relocating the fixmap due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 * reserve_top(), so do this before touching the ioremap area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	olpc_ofw_detect();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	idt_setup_early_traps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	early_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	arch_init_ideal_nops();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	jump_label_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	static_call_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	early_ioremap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	setup_olpc_ofw_pgd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	screen_info = boot_params.screen_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	edid_info = boot_params.edid_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	apm_info.bios = boot_params.apm_bios_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	ist_info = boot_params.ist_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	saved_video_mode = boot_params.hdr.vid_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	bootloader_type = boot_params.hdr.type_of_loader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if ((bootloader_type >> 4) == 0xe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		bootloader_type &= 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	bootloader_version  = bootloader_type & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) #ifdef CONFIG_BLK_DEV_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) #ifdef CONFIG_EFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		     EFI32_LOADER_SIGNATURE, 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		set_bit(EFI_BOOT, &efi.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		     EFI64_LOADER_SIGNATURE, 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		set_bit(EFI_BOOT, &efi.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		set_bit(EFI_64BIT, &efi.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	x86_init.oem.arch_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	e820__memory_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	parse_setup_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	copy_edd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (!boot_params.hdr.root_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		root_mountflags &= ~MS_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	init_mm.start_code = (unsigned long) _text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	init_mm.end_code = (unsigned long) _etext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	init_mm.end_data = (unsigned long) _edata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	init_mm.brk = _brk_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	code_resource.start = __pa_symbol(_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	code_resource.end = __pa_symbol(_etext)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	rodata_resource.start = __pa_symbol(__start_rodata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	rodata_resource.end = __pa_symbol(__end_rodata)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	data_resource.start = __pa_symbol(_sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	data_resource.end = __pa_symbol(_edata)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	bss_resource.start = __pa_symbol(__bss_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	bss_resource.end = __pa_symbol(__bss_stop)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) #ifdef CONFIG_CMDLINE_BOOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) #ifdef CONFIG_CMDLINE_OVERRIDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if (builtin_cmdline[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		/* append boot loader cmdline to builtin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	*cmdline_p = command_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 * x86_configure_nx() is called before parse_early_param() to detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 * whether hardware doesn't support NX (so that the early EHCI debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 * console setup can safely call set_fixmap()). It may then be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 * again from within noexec_setup() during parsing early parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 * to honor the respective command line option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	x86_configure_nx();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	parse_early_param();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	if (efi_enabled(EFI_BOOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		efi_memblock_x86_reserve_range();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * Memory used by the kernel cannot be hot-removed because Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * cannot migrate the kernel pages. When memory hotplug is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 * enabled, we should prevent memblock from allocating memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	 * for the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	 * ACPI SRAT records all hotpluggable memory ranges. But before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	 * SRAT is parsed, we don't know about it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	 * The kernel image is loaded into memory at very early time. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 * cannot prevent this anyway. So on NUMA system, we set any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 * node the kernel resides in as un-hotpluggable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	 * Since on modern servers, one node could have double-digit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	 * gigabytes memory, we can assume the memory around the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	 * image is also un-hotpluggable. So before SRAT is parsed, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	 * allocate memory near the kernel image to try the best to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * the kernel away from hotpluggable memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	if (movable_node_is_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		memblock_set_bottom_up(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	x86_report_nx();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	/* after early param, so could get panic from serial */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	memblock_x86_reserve_range_setup_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (acpi_mps_check()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) #ifdef CONFIG_X86_LOCAL_APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		disable_apic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		setup_clear_cpu_cap(X86_FEATURE_APIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	e820__reserve_setup_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	e820__finish_early_params();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (efi_enabled(EFI_BOOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		efi_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	dmi_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	 * VMware detection requires dmi to be available, so this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	 * needs to be done after dmi_setup(), for the boot CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	init_hypervisor_platform();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	tsc_early_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	x86_init.resources.probe_roms();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/* after parse_early_param, so could debug it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	insert_resource(&iomem_resource, &code_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	insert_resource(&iomem_resource, &rodata_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	insert_resource(&iomem_resource, &data_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	insert_resource(&iomem_resource, &bss_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	e820_add_kernel_range();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	trim_bios_range();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	if (ppro_with_ram_bug()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 				  E820_TYPE_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		e820__update_table(e820_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		printk(KERN_INFO "fixed physical RAM map:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		e820__print_table("bad_ppro");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	early_gart_iommu_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * partially used pages are not usable - thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * we are rounding upwards:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	max_pfn = e820__end_of_ram_pfn();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	/* update e820 for memory not covered by WB MTRRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	mtrr_bp_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (mtrr_trim_uncached_memory(max_pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		max_pfn = e820__end_of_ram_pfn();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	max_possible_pfn = max_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	 * This call is required when the CPU does not support PAT. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	 * mtrr_bp_init() invoked it already via pat_init() the call has no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	 * effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	init_cache_modes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 * Define random base addresses for memory sections after max_pfn is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	 * defined and before each memory section base is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	kernel_randomize_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	/* max_low_pfn get updated here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	find_low_pfn_range();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	check_x2apic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	/* How many end-of-memory variables you have, grandma! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	/* need this before calling reserve_initrd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		max_low_pfn = e820__end_of_low_ram_pfn();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		max_low_pfn = max_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	 * Find and reserve possible boot-time SMP configuration:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	find_smp_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	reserve_ibft_region();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	early_alloc_pgt_buf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	 * Need to conclude brk, before e820__memblock_setup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	 *  it could use memblock_find_in_range, could overlap with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	 *  brk area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	reserve_brk();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	cleanup_highmap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	memblock_set_current_limit(ISA_END_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	e820__memblock_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	reserve_bios_regions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	efi_fake_memmap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	efi_find_mirror();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	efi_esrt_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	efi_mokvar_table_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	 * The EFI specification says that boot service code won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	 * called after ExitBootServices(). This is, in fact, a lie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	efi_reserve_boot_services();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	/* preallocate 4k for mptable mpc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	e820__memblock_alloc_reserved_mpc_new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	setup_bios_corruption_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			(max_pfn_mapped<<PAGE_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	reserve_real_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	trim_platform_memory_ranges();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	trim_low_memory_range();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	init_mem_mapping();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	idt_setup_early_pf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	 * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	 * with the current CR4 value.  This may not be necessary, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	 * auditing all the early-boot CR4 manipulation would be needed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	 * rule it out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	 * Mask off features that don't work outside long mode (just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	 * PCIDE for now).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	memblock_set_current_limit(get_max_mapped());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if (init_ohci1394_dma_early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		init_ohci1394_dma_on_all_controllers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	/* Allocate bigger log buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	setup_log_buf(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (efi_enabled(EFI_BOOT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		switch (boot_params.secure_boot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		case efi_secureboot_mode_disabled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			pr_info("Secure boot disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		case efi_secureboot_mode_enabled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			pr_info("Secure boot enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			pr_info("Secure boot could not be determined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	reserve_initrd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	acpi_table_upgrade();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	/* Look for ACPI tables and reserve memory occupied by them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	acpi_boot_table_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	vsmp_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	io_delay_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	early_platform_quirks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	early_acpi_boot_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	initmem_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (boot_cpu_has(X86_FEATURE_GBPAGES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	 * Reserve memory for crash kernel after SRAT is parsed so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	 * won't consume hotpluggable memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	reserve_crashkernel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	memblock_find_dma_reserve();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (!early_xdbc_setup_hardware())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		early_xdbc_register_console();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	x86_init.paging.pagetable_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	kasan_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	 * Sync back kernel address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	 * this call?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	sync_initial_page_table();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	tboot_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	map_vsyscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	generic_apic_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	early_quirks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	 * Read APIC and some other early information from ACPI tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	acpi_boot_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	sfi_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	x86_dtb_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	 * get boot-time SMP configuration:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	get_smp_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	 * Systems w/o ACPI and mptables might not have it mapped the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	 * APIC yet, but prefill_possible_map() might need to access it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	init_apic_mappings();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	prefill_possible_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	init_cpu_to_node();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	init_gi_nodes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	io_apic_init_mappings();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	x86_init.hyper.guest_late_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	e820__reserve_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	e820__register_nosave_regions(max_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	x86_init.resources.reserve_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	e820__setup_pci_gap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) #ifdef CONFIG_VT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) #if defined(CONFIG_VGA_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		conswitchp = &vga_con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	x86_init.oem.banner();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	x86_init.timers.wallclock_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	mcheck_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	register_refined_jiffies(CLOCK_TICK_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) #ifdef CONFIG_EFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (efi_enabled(EFI_BOOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		efi_apply_memmap_quirks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	unwind_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static struct resource video_ram_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	.name	= "Video RAM area",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	.start	= 0xa0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	.end	= 0xbffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) void __init i386_reserve_resources(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	request_resource(&iomem_resource, &video_ram_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	reserve_standard_io_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) #endif /* CONFIG_X86_32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static struct notifier_block kernel_offset_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	.notifier_call = dump_kernel_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static int __init register_kernel_offset_dumper(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	atomic_notifier_chain_register(&panic_notifier_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 					&kernel_offset_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) __initcall(register_kernel_offset_dumper);