^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * S390 version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright IBM Corp. 1999, 2012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author(s): Hartmut Penner (hp@de.ibm.com),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Martin Schwidefsky (schwidefsky@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Derived from "arch/i386/kernel/setup.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 1995, Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This file handles the architecture-dependent parts of initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define KMSG_COMPONENT "setup"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/initrd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/root_dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/pfn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/start_kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <asm/boot_data.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/ipl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/facility.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <asm/cpcmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <asm/lowcore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <asm/ebcdic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <asm/diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <asm/os_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <asm/sclp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <asm/sysinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <asm/nospec-branch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <asm/mem_detect.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <asm/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include "entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Machine setup..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned int console_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) EXPORT_SYMBOL(console_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned int console_devno = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) EXPORT_SYMBOL(console_devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned int console_irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXPORT_SYMBOL(console_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long elf_hwcap __read_mostly = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) char elf_platform[ELF_PLATFORM_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned long int_hwcap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int __bootdata(noexec_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int __bootdata(memory_end_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long __bootdata(memory_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long __bootdata(vmalloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long __bootdata(max_physmem_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct mem_detect_info __bootdata(mem_detect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned long __bootdata_preserved(__stext_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long __bootdata_preserved(__etext_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned long __bootdata_preserved(__sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned long __bootdata_preserved(__edma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned long __bootdata_preserved(__kaslr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned int __bootdata_preserved(zlib_dfltcc_support);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) EXPORT_SYMBOL(zlib_dfltcc_support);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long VMALLOC_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) EXPORT_SYMBOL(VMALLOC_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long VMALLOC_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) EXPORT_SYMBOL(VMALLOC_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct page *vmemmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) EXPORT_SYMBOL(vmemmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long vmemmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned long MODULES_VADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long MODULES_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* An array with a pointer to the lowcore of every CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct lowcore *lowcore_ptr[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) EXPORT_SYMBOL(lowcore_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * The Write Back bit position in the physaddr is given by the SLPC PCI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Leaving the mask zero always uses write through which is safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned long mio_wb_bit_mask __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * This is set up by the setup-routine at boot-time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * for S390 need to find out, what we have to setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * using address 0x10400 ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * condev= and conmode= setup parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static int __init condev_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) vdev = simple_strtoul(str, &str, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (vdev >= 0 && vdev < 65536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) console_devno = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) console_irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) __setup("condev=", condev_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void __init set_preferred_console(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) add_preferred_console("ttyS", 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) else if (CONSOLE_IS_3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) add_preferred_console("tty3270", 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else if (CONSOLE_IS_VT220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) add_preferred_console("ttysclp", 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) else if (CONSOLE_IS_HVC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) add_preferred_console("hvc", 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static int __init conmode_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) SET_CONSOLE_SCLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #if defined(CONFIG_TN3215_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (!strcmp(str, "3215"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) SET_CONSOLE_3215;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #if defined(CONFIG_TN3270_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!strcmp(str, "3270"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) SET_CONSOLE_3270;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) set_preferred_console();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) __setup("conmode=", conmode_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void __init conmode_default(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) char query_buffer[1024];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (MACHINE_IS_VM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ptr = strstr(query_buffer, "SUBCHANNEL =");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) console_irq = simple_strtoul(ptr + 13, NULL, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) cpcmd("QUERY TERM", query_buffer, 1024, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ptr = strstr(query_buffer, "CONMODE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Set the conmode to 3215 so that the device recognition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * will set the cu_type of the console to 3215. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * conmode is 3270 and we don't set it back then both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * 3215 and the 3270 driver will try to access the console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * device (3215 as console and 3270 as normal tty).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (ptr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) SET_CONSOLE_SCLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (str_has_prefix(ptr + 8, "3270")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #if defined(CONFIG_TN3270_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) SET_CONSOLE_3270;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #elif defined(CONFIG_TN3215_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) SET_CONSOLE_3215;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) SET_CONSOLE_SCLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) } else if (str_has_prefix(ptr + 8, "3215")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #if defined(CONFIG_TN3215_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) SET_CONSOLE_3215;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #elif defined(CONFIG_TN3270_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) SET_CONSOLE_3270;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) SET_CONSOLE_SCLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) } else if (MACHINE_IS_KVM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) SET_CONSOLE_VT220;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) SET_CONSOLE_SCLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) SET_CONSOLE_HVC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) SET_CONSOLE_SCLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void __init setup_zfcpdump(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!is_ipl_type_dump())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (OLDMEM_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) console_loglevel = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static inline void setup_zfcpdump(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif /* CONFIG_CRASH_DUMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Reboot, halt and power_off stubs. They just call _machine_restart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * _machine_halt or _machine_power_off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void machine_restart(char *command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * Only unblank the console if we are called in enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * context or a bust_spinlocks cleared the way for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) console_unblank();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) _machine_restart(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) void machine_halt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!in_interrupt() || oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Only unblank the console if we are called in enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * context or a bust_spinlocks cleared the way for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) console_unblank();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) _machine_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) void machine_power_off(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!in_interrupt() || oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Only unblank the console if we are called in enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * context or a bust_spinlocks cleared the way for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) console_unblank();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) _machine_power_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Dummy power off function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) void (*pm_power_off)(void) = machine_power_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) EXPORT_SYMBOL_GPL(pm_power_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void *restart_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long stack_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) THREADINFO_GFP, NUMA_NO_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void stack_free(unsigned long stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) vfree((void *) stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) free_pages(stack, THREAD_SIZE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int __init arch_early_irq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) panic("Couldn't allocate async stack");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static int __init async_stack_realloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned long old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) new = stack_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) panic("Couldn't allocate async stack");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) free_pages(old, THREAD_SIZE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) early_initcall(async_stack_realloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) void __init arch_call_rest_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) stack = stack_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) panic("Couldn't allocate kernel stack");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) current->stack = (void *) stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) current->stack_vm_area = (void *) stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) set_task_stack_end_magic(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) stack += STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) S390_lowcore.kernel_stack = stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) CALL_ON_STACK_NORETURN(rest_init, stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void __init setup_lowcore_dat_off(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) unsigned long int_psw_mask = PSW_KERNEL_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct lowcore *lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (IS_ENABLED(CONFIG_KASAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int_psw_mask |= PSW_MASK_DAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Setup lowcore for boot cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (!lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) panic("%s: Failed to allocate %zu bytes align=%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) __func__, sizeof(*lc), sizeof(*lc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) lc->restart_psw.mask = PSW_KERNEL_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) lc->restart_psw.addr = (unsigned long) restart_int_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) lc->external_new_psw.addr = (unsigned long) ext_int_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) lc->svc_new_psw.addr = (unsigned long) system_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) lc->io_new_psw.addr = (unsigned long) io_int_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) lc->clock_comparator = clock_comparator_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) lc->nodat_stack = ((unsigned long) &init_thread_union)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) lc->current_task = (unsigned long)&init_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) lc->lpp = LPP_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) lc->machine_flags = S390_lowcore.machine_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) lc->preempt_count = S390_lowcore.preempt_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) sizeof(lc->stfle_fac_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) sizeof(lc->alt_stfle_fac_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) nmi_alloc_boot_cpu(lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) lc->async_enter_timer = S390_lowcore.async_enter_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) lc->exit_timer = S390_lowcore.exit_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) lc->user_timer = S390_lowcore.user_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) lc->system_timer = S390_lowcore.system_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) lc->steal_timer = S390_lowcore.steal_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) lc->last_update_timer = S390_lowcore.last_update_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) lc->last_update_clock = S390_lowcore.last_update_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * Allocate the global restart stack which is the same for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * all CPUs in cast *one* of them does a PSW restart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (!restart_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) __func__, THREAD_SIZE, THREAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) restart_stack += STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * restart data to the absolute zero lowcore. This is necessary if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * PSW restart is done on an offline CPU that has lowcore zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) lc->restart_stack = (unsigned long) restart_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) lc->restart_fn = (unsigned long) do_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) lc->restart_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) lc->restart_source = -1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Setup absolute zero lowcore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) lc->spinlock_lockval = arch_spin_lockval(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) lc->spinlock_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) arch_spin_lock_setup(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) lc->br_r1_trampoline = 0x07f1; /* br %r1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) lc->preempt_count = PREEMPT_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) set_prefix((u32)(unsigned long) lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) lowcore_ptr[0] = lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static void __init setup_lowcore_dat_on(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) __ctl_clear_bit(0, 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) __ctl_set_bit(0, 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static struct resource code_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) .name = "Kernel code",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static struct resource data_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) .name = "Kernel data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static struct resource bss_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) .name = "Kernel bss",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static struct resource __initdata *standard_resources[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) &code_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) &data_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) &bss_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void __init setup_resources(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct resource *res, *std_res, *sub_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) phys_addr_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) code_resource.start = (unsigned long) _text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) code_resource.end = (unsigned long) _etext - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) data_resource.start = (unsigned long) _etext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) data_resource.end = (unsigned long) _edata - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) bss_resource.start = (unsigned long) __bss_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) bss_resource.end = (unsigned long) __bss_stop - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) for_each_mem_range(i, &start, &end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) res = memblock_alloc(sizeof(*res), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) panic("%s: Failed to allocate %zu bytes align=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) __func__, sizeof(*res), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) res->name = "System RAM";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) res->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * In memblock, end points to the first byte after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * range while in resourses, end points to the last byte in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * the range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) res->end = end - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) request_resource(&iomem_resource, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) std_res = standard_resources[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (std_res->start < res->start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) std_res->start > res->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (std_res->end > res->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) sub_res = memblock_alloc(sizeof(*sub_res), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!sub_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) panic("%s: Failed to allocate %zu bytes align=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) __func__, sizeof(*sub_res), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *sub_res = *std_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) sub_res->end = res->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) std_res->start = res->end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) request_resource(res, sub_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) request_resource(res, std_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Re-add removed crash kernel memory as reserved memory. This makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * sure it will be mapped with the identity mapping and struct pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * will be created, so it can be resized later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * However add it later since the crash kernel resource should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * part of the System RAM resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (crashk_res.end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) memblock_reserve(crashk_res.start, resource_size(&crashk_res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) insert_resource(&iomem_resource, &crashk_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static void __init setup_memory_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unsigned long vmax, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* Choose kernel address space layout: 3 or 4 levels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) vmax = _REGION2_SIZE; /* 3-level kernel page table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) vmax = _REGION1_SIZE; /* 4-level kernel page table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (is_prot_virt_host())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) adjust_to_uv_max(&vmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #ifdef CONFIG_KASAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) vmax = kasan_vmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* module area is at the end of the kernel address space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) MODULES_END = vmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) MODULES_VADDR = MODULES_END - MODULES_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) VMALLOC_END = MODULES_VADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) VMALLOC_START = VMALLOC_END - vmalloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Split remaining virtual space between 1:1 mapping & vmemmap array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) tmp = SECTION_ALIGN_UP(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) tmp = VMALLOC_START - tmp * sizeof(struct page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) tmp &= ~((vmax >> 11) - 1); /* align to page table level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) vmemmap = (struct page *) tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Take care that memory_end is set and <= vmemmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #ifdef CONFIG_KASAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) memory_end = min(memory_end, KASAN_SHADOW_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) vmemmap_size = SECTION_ALIGN_UP(memory_end / PAGE_SIZE) * sizeof(struct page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #ifdef CONFIG_KASAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* move vmemmap above kasan shadow only if stands in a way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (KASAN_SHADOW_END > (unsigned long)vmemmap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) (unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) max_pfn = max_low_pfn = PFN_DOWN(memory_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) memblock_remove(memory_end, ULONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * When kdump is enabled, we have to ensure that no memory from the area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * [0 - crashkernel memory size] is set offline - it will be exchanged with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * the crashkernel memory region when kdump is triggered. The crashkernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * memory region can never get offlined (pages are unmovable).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static int kdump_mem_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) unsigned long action, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct memory_notify *arg = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (action != MEM_GOING_OFFLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static struct notifier_block kdump_mem_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) .notifier_call = kdump_mem_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Make sure that the area behind memory_end is protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static void __init reserve_memory_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (memory_end_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) memblock_reserve(memory_end, ULONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * Make sure that oldmem, where the dump is stored, is protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static void __init reserve_oldmem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (OLDMEM_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Forget all memory above the running kdump system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Make sure that oldmem, where the dump is stored, is protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static void __init remove_oldmem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (OLDMEM_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Forget all memory above the running kdump system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * Reserve memory for kdump kernel to be loaded with kexec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static void __init reserve_crashkernel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) unsigned long long crash_base, crash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) phys_addr_t low, high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) &crash_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (rc || crash_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (memblock.memory.regions[0].size < crash_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) pr_info("crashkernel reservation failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) "first memory chunk must be at least crashkernel size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) low = crash_base ?: OLDMEM_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) high = low + crash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* The crashkernel fits into OLDMEM, reuse OLDMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) crash_base = low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /* Find suitable area in free memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) low = max_t(unsigned long, crash_size, sclp.hsa_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) high = crash_base ? crash_base + crash_size : ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (crash_base && crash_base < low) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) pr_info("crashkernel reservation failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) "crash_base too low");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) low = crash_base ?: low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) crash_base = memblock_find_in_range(low, high, crash_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) KEXEC_CRASH_MEM_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!crash_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) pr_info("crashkernel reservation failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) "no suitable area found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (register_memory_notifier(&kdump_mem_nb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (!OLDMEM_BASE && MACHINE_IS_VM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) crashk_res.start = crash_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) crashk_res.end = crash_base + crash_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) memblock_remove(crash_base, crash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) pr_info("Reserving %lluMB of memory at %lluMB "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) "for crashkernel (System RAM: %luMB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) crash_size >> 20, crash_base >> 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) (unsigned long)memblock.memory.total_size >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) os_info_crashkernel_add(crash_base, crash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * Reserve the initrd from being used by memblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static void __init reserve_initrd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!INITRD_START || !INITRD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) initrd_start = INITRD_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) initrd_end = initrd_start + INITRD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) memblock_reserve(INITRD_START, INITRD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * Reserve the memory area used to pass the certificate lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static void __init reserve_certificate_list(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (ipl_cert_list_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static void __init reserve_mem_detect_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) unsigned long start, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) get_mem_detect_reserved(&start, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) memblock_reserve(start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static void __init free_mem_detect_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) unsigned long start, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) get_mem_detect_reserved(&start, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) memblock_free(start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static const char * __init get_mem_info_source(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) switch (mem_detect.info_source) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) case MEM_DETECT_SCLP_STOR_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return "sclp storage info";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) case MEM_DETECT_DIAG260:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return "diag260";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) case MEM_DETECT_SCLP_READ_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return "sclp read info";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) case MEM_DETECT_BIN_SEARCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return "binary search";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return "none";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static void __init memblock_add_mem_detect_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) pr_debug("physmem info source: %s (%hhd)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) get_mem_info_source(), mem_detect.info_source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* keep memblock lists close to the kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) memblock_set_bottom_up(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) for_each_mem_detect_block(i, &start, &end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) memblock_add(start, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) memblock_physmem_add(start, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) memblock_set_bottom_up(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) memblock_dump_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * Check for initrd being in usable memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static void __init check_initrd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (INITRD_START && INITRD_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) pr_err("The initial RAM disk does not fit into the memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) memblock_free(INITRD_START, INITRD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) initrd_start = initrd_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * Reserve memory used for lowcore/command line/kernel image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static void __init reserve_kernel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) unsigned long start_pfn = PFN_UP(__pa(_end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) memblock_reserve(0, HEAD_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) - (unsigned long)_stext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) memblock_reserve(__sdma, __edma - __sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static void __init setup_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) phys_addr_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * Init storage key for present memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) for_each_mem_range(i, &start, &end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) storage_key_init_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) psw_set_key(PAGE_DEFAULT_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * Setup hardware capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static int __init setup_hwcaps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct cpuid cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * The store facility list bits numbers as found in the principles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * of operation are numbered with bit 1UL<<31 as number 0 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * bit 1UL<<0 as number 31.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Bit 0: instructions named N3, "backported" to esa-mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * Bit 2: z/Architecture mode is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Bit 7: the store-facility-list-extended facility is installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Bit 17: the message-security assist is installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * Bit 19: the long-displacement facility is installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Bit 21: the extended-immediate facility is installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * Bit 22: extended-translation facility 3 is installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * Bit 30: extended-translation facility 3 enhancement facility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * These get translated to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * HWCAP_S390_ETF3EH bit 8 (22 && 30).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (test_facility(stfl_bits[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) elf_hwcap |= 1UL << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (test_facility(22) && test_facility(30))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) elf_hwcap |= HWCAP_S390_ETF3EH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * Check for additional facilities with store-facility-list-extended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * as stored by stfl, bits 32-xxx contain additional facilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * How many facility words are stored depends on the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * doublewords passed to the instruction. The additional facilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * Bit 42: decimal floating point facility is installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * Bit 44: perform floating point operation facility is installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * translated to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * HWCAP_S390_DFP bit 6 (42 && 44).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) elf_hwcap |= HWCAP_S390_DFP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * Huge page support HWCAP_S390_HPAGE is bit 7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (MACHINE_HAS_EDAT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) elf_hwcap |= HWCAP_S390_HPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * 64-bit register support for 31-bit processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * HWCAP_S390_HIGH_GPRS is bit 9.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) elf_hwcap |= HWCAP_S390_HIGH_GPRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Transactional execution support HWCAP_S390_TE is bit 10.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (MACHINE_HAS_TE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) elf_hwcap |= HWCAP_S390_TE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * instead of facility bit 129.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (MACHINE_HAS_VX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) elf_hwcap |= HWCAP_S390_VXRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (test_facility(134))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) elf_hwcap |= HWCAP_S390_VXRS_BCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (test_facility(135))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) elf_hwcap |= HWCAP_S390_VXRS_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (test_facility(148))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) elf_hwcap |= HWCAP_S390_VXRS_EXT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (test_facility(152))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) elf_hwcap |= HWCAP_S390_VXRS_PDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (test_facility(150))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) elf_hwcap |= HWCAP_S390_SORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (test_facility(151))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) elf_hwcap |= HWCAP_S390_DFLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Guarded storage support HWCAP_S390_GS is bit 12.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (MACHINE_HAS_GS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) elf_hwcap |= HWCAP_S390_GS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) get_cpu_id(&cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) add_device_randomness(&cpu_id, sizeof(cpu_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) switch (cpu_id.machine) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) case 0x2064:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) case 0x2066:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) default: /* Use "z900" as default for 64 bit kernels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) strcpy(elf_platform, "z900");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) case 0x2084:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) case 0x2086:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) strcpy(elf_platform, "z990");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) case 0x2094:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) case 0x2096:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) strcpy(elf_platform, "z9-109");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case 0x2097:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case 0x2098:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) strcpy(elf_platform, "z10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) case 0x2817:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) case 0x2818:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) strcpy(elf_platform, "z196");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) case 0x2827:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) case 0x2828:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) strcpy(elf_platform, "zEC12");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) case 0x2964:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case 0x2965:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) strcpy(elf_platform, "z13");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) case 0x3906:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) case 0x3907:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) strcpy(elf_platform, "z14");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) case 0x8561:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) case 0x8562:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) strcpy(elf_platform, "z15");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * Virtualization support HWCAP_INT_SIE is bit 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (sclp.has_sief2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int_hwcap |= HWCAP_INT_SIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) arch_initcall(setup_hwcaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * Add system information as device randomness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static void __init setup_randomness(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct sysinfo_3_2_2 *vmms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (!vmms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) panic("Failed to allocate memory for sysinfo structure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) memblock_free((unsigned long) vmms, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * Find the correct size for the task_struct. This depends on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * the size of the struct fpu at the end of the thread_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * which is embedded in the task_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static void __init setup_task_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) int task_size = sizeof(struct task_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (!MACHINE_HAS_VX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) task_size -= sizeof(__vector128) * __NUM_VXRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) task_size += sizeof(freg_t) * __NUM_FPRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) arch_task_struct_size = task_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * Issue diagnose 318 to set the control program name and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * version codes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static void __init setup_control_program_code(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) union diag318_info diag318_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) .cpnc = CPNC_LINUX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .cpvc = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (!sclp.has_diag318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) diag_stat_inc(DIAG_STAT_X318);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * Print the component list from the IPL report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static void __init log_component_list(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct ipl_rb_component_entry *ptr, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (!early_ipl_comp_list_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) pr_info("Linux is running with Secure-IPL enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) pr_info("Linux is running with Secure-IPL disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ptr = (void *) early_ipl_comp_list_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) end = (void *) ptr + early_ipl_comp_list_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) pr_info("The IPL report contains the following components:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) while (ptr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) str = "signed, verified";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) str = "signed, verification failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) str = "not signed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) pr_info("%016llx - %016llx (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) ptr->addr, ptr->addr + ptr->len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * Setup function called from init/main.c just after the banner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * was printed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) void __init setup_arch(char **cmdline_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * print what head.S has found out about the machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (MACHINE_IS_VM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) pr_info("Linux is running as a z/VM "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) "guest operating system in 64-bit mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) else if (MACHINE_IS_KVM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) pr_info("Linux is running under KVM in 64-bit mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) else if (MACHINE_IS_LPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) pr_info("Linux is running natively in 64-bit mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) pr_info("Linux is running as a guest in 64-bit mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) log_component_list();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /* Have one command line that is parsed and saved in /proc/cmdline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* boot_command_line has been already set up in early.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) *cmdline_p = boot_command_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) ROOT_DEV = Root_RAM0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) init_mm.start_code = (unsigned long) _text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) init_mm.end_code = (unsigned long) _etext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) init_mm.end_data = (unsigned long) _edata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) init_mm.brk = (unsigned long) _end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) nospec_auto_detect();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) jump_label_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) parse_early_param();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* Deactivate elfcorehdr= kernel parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) elfcorehdr_addr = ELFCORE_ADDR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) os_info_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) setup_ipl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) setup_task_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) setup_control_program_code();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* Do some memory reservations *before* memory is added to memblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) reserve_memory_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) reserve_oldmem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) reserve_kernel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) reserve_initrd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) reserve_certificate_list();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) reserve_mem_detect_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) memblock_allow_resize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* Get information about *all* installed memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) memblock_add_mem_detect_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) free_mem_detect_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) remove_oldmem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) setup_uv();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) setup_memory_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) setup_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dma_contiguous_reserve(memory_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) vmcp_cma_reserve();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) check_initrd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) reserve_crashkernel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * Be aware that smp_save_dump_cpus() triggers a system reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * Therefore CPU and device initialization should be done afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) smp_save_dump_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) setup_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) setup_lowcore_dat_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) smp_fill_possible_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) cpu_detect_mhz_feature();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) numa_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) smp_detect_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) topology_init_early();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * Create kernel page tables and switch to virtual addressing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) paging_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * After paging_init created the kernel page table, the new PSWs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * in lowcore can now run with DAT enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) setup_lowcore_dat_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* Setup default console */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) conmode_default();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) set_preferred_console();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) apply_alternative_instructions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (IS_ENABLED(CONFIG_EXPOLINE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) nospec_init_branches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* Setup zfcp/nvme dump support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) setup_zfcpdump();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* Add system specific data to the random pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) setup_randomness();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }