Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Common boot and setup code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 2001 PPC64 Team, IBM Corp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/initrd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/root_dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/serial_8250.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/lockdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <asm/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <asm/kdump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <asm/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <asm/paca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <asm/dt_cpu_ftrs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <asm/btext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <asm/nvram.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <asm/rtas.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <asm/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <asm/xmon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <asm/udbg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <asm/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <asm/code-patching.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <asm/livepatch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <asm/opal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <asm/cputhreads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <asm/feature-fixups.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <asm/kup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <asm/early_ioremap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #include "setup.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) int spinning_secondaries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) u64 ppc64_pft_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) struct ppc64_caches ppc64_caches = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	.l1d = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		.block_size = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		.log_block_size = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	.l1i = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		.block_size = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		.log_block_size = 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) EXPORT_SYMBOL_GPL(ppc64_caches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) void __init setup_tlb_core_data(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		int first = cpu_first_thread_sibling(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		 * If we boot via kdump on a non-primary thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		 * make sure we point at the thread that actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		 * set up this TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		if (cpu_first_thread_sibling(boot_cpuid) == first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 			first = boot_cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		 * If we have threads, we need either tlbsrx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		 * or e6500 tablewalk mode, or else TLB handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		 * will be racy and could produce duplicate entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		 * Should we panic instead?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		WARN_ONCE(smt_enabled_at_boot >= 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 			  !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 			  book3e_htw_mode != PPC_HTW_E6500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 			  "%s: unsupported MMU configuration\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) static char *smt_enabled_cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /* Look for ibm,smt-enabled OF option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) void __init check_smt_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	const char *smt_option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	/* Default to enabling all threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	smt_enabled_at_boot = threads_per_core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	/* Allow the command line to overrule the OF option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	if (smt_enabled_cmdline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		if (!strcmp(smt_enabled_cmdline, "on"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			smt_enabled_at_boot = threads_per_core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		else if (!strcmp(smt_enabled_cmdline, "off"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 			smt_enabled_at_boot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			int smt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 				smt_enabled_at_boot =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 					min(threads_per_core, smt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		dn = of_find_node_by_path("/options");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		if (dn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			smt_option = of_get_property(dn, "ibm,smt-enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 						     NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			if (smt_option) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 				if (!strcmp(smt_option, "on"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 					smt_enabled_at_boot = threads_per_core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 				else if (!strcmp(smt_option, "off"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 					smt_enabled_at_boot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			of_node_put(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) /* Look for smt-enabled= cmdline option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static int __init early_smt_enabled(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	smt_enabled_cmdline = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) early_param("smt-enabled", early_smt_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) /** Fix up paca fields required for the boot cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static void __init fixup_boot_paca(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	/* The boot cpu is started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	get_paca()->cpu_start = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	/* Allow percpu accesses to work until we setup percpu data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	get_paca()->data_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	/* Mark interrupts disabled in PACA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	irq_soft_mask_set(IRQS_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static void __init configure_exceptions(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	 * Setup the trampolines from the lowmem exception vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	 * to the kdump kernel when not using a relocatable kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	setup_kdump_trampoline();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	/* Under a PAPR hypervisor, we need hypercalls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		/* Enable AIL if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		if (!pseries_enable_reloc_on_exc()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			init_task.thread.fscr &= ~FSCR_SCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		 * Tell the hypervisor that we want our exceptions to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		 * be taken in little endian mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		 * We don't call this for big endian as our calling convention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		 * makes us always enter in BE, and the call may fail under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		 * some circumstances with kdump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #ifdef __LITTLE_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		pseries_little_endian_exceptions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		/* Set endian mode using OPAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		if (firmware_has_feature(FW_FEATURE_OPAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			opal_configure_cores();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		/* AIL on native is done in cpu_ready_for_interrupts() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static void cpu_ready_for_interrupts(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	 * Enable AIL if supported, and we are in hypervisor mode. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	 * is called once for every processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	 * If we are not in hypervisor mode the job is done once for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	 * the whole partition in configure_exceptions().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		unsigned long lpcr = mfspr(SPRN_LPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		unsigned long new_lpcr = lpcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 			/* P10 DD1 does not have HAIL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			if (pvr_version_is(PVR_POWER10) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 					(mfspr(SPRN_PVR) & 0xf00) == 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 				new_lpcr |= LPCR_AIL_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 				new_lpcr |= LPCR_HAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		} else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 			new_lpcr |= LPCR_AIL_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		if (new_lpcr != lpcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			mtspr(SPRN_LPCR, new_lpcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	 * Set HFSCR:TM based on CPU features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	 * In the special case of TM no suspend (P9N DD2.1), Linux is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	 * told TM is off via the dt-ftrs but told to (partially) use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	 * will be off from dt-ftrs but we need to turn it on for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 * no suspend case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		if (cpu_has_feature(CPU_FTR_TM_COMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 			mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	/* Set IR and DR in PACA MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	get_paca()->kernel_msr = MSR_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) unsigned long spr_default_dscr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) void __init record_spr_defaults(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	if (early_cpu_has_feature(CPU_FTR_DSCR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		spr_default_dscr = mfspr(SPRN_DSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * Early initialization entry point. This is called by head.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * with MMU translation disabled. We rely on the "feature" of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * the CPU that ignores the top 2 bits of the address in real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * mode so we can access kernel globals normally provided we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * only toy with things in the RMO region. From here, we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * some early parsing of the device-tree to setup out MEMBLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * data structures, and allocate & initialize the hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * and segment tables so we can start running with translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * It is this function which will call the probe() callback of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * the various platform types and copy the matching one to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * global ppc_md structure. Your platform can eventually do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  * some very early initializations from the probe() routine, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * this is not recommended, be very careful as, for example, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  * device-tree is not accessible via normal means at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) void __init early_setup(unsigned long dt_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	static __initdata struct paca_struct boot_paca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	/* -------- printk is _NOT_ safe to use here ! ------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	 * Assume we're on cpu 0 for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	 * We need to load a PACA very early for a few reasons.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	 * The stack protector canary is stored in the paca, so as soon as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	 * call any stack protected code we need r13 pointing somewhere valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	 * If we are using kcov it will call in_task() in its instrumentation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	 * which relies on the current task from the PACA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	 * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	 * printk(), which can trigger both stack protector and kcov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	 * percpu variables and spin locks also use the paca.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	 * So set up a temporary paca. It will be replaced below once we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	 * what CPU we are on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	initialise_paca(&boot_paca, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	setup_paca(&boot_paca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	fixup_boot_paca();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	/* -------- printk is now safe to use ------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	/* Try new device tree based feature discovery ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (!dt_cpu_ftrs_init(__va(dt_ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		/* Otherwise use the old style CPU table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		identify_cpu(0, mfspr(SPRN_PVR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	/* Enable early debugging if any specified (see udbg.h) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	udbg_early_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	 * Do early initialization using the flattened device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	 * tree, such as retrieving the physical memory map or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 * calculating/retrieving the hash table size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	early_init_devtree(__va(dt_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	/* Now we know the logical id of our boot cpu, setup the paca. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	if (boot_cpuid != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		/* Poison paca_ptrs[0] again if it's not the boot cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	setup_paca(paca_ptrs[boot_cpuid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	fixup_boot_paca();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	 * Configure exception handlers. This include setting up trampolines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	 * if needed, setting exception endian mode, etc...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	configure_exceptions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 * Configure Kernel Userspace Protection. This needs to happen before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	 * feature fixups for platforms that implement this using features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	setup_kup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	/* Apply all the dynamic patching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	apply_feature_fixups();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	setup_feature_keys();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	/* Initialize the hash table or TLB handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	early_init_mmu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	early_ioremap_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	 * After firmware and early platform setup code has set things up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	 * we note the SPR values for configurable control/performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	 * registers, and use those as initial defaults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	record_spr_defaults();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	 * At this point, we can let interrupts switch to virtual mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	 * (the MMU has been setup), so adjust the MSR in the PACA to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	 * have IR and DR set and enable AIL if it exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	cpu_ready_for_interrupts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * will only actually get enabled on the boot cpu much later once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * ftrace itself has been initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	this_cpu_enable_ftrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	udbg_printf(" <- %s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	 * This needs to be done *last* (after the above udbg_printf() even)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	 * Right after we return from this function, we turn on the MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	 * which means the real-mode access trick that btext does will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	 * no longer work, it needs to switch to using a real MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	 * mapping. This call will ensure that it does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	btext_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) void early_setup_secondary(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	/* Mark interrupts disabled in PACA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	irq_soft_mask_set(IRQS_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	/* Initialize the hash table or TLB handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	early_init_mmu_secondary();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	/* Perform any KUP setup that is per-cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	setup_kup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * At this point, we can let interrupts switch to virtual mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 * (the MMU has been setup), so adjust the MSR in the PACA to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 * have IR and DR set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	cpu_ready_for_interrupts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) void panic_smp_self_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	spin_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	while (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		spin_cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) static bool use_spinloop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		 * See comments in head_64.S -- not all platforms insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		 * secondaries at __secondary_hold and wait at the spin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		 * loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		if (firmware_has_feature(FW_FEATURE_OPAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	 * When book3e boots from kexec, the ePAPR spin table does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	 * not get used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) void smp_release_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	unsigned long *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (!use_spinloop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	/* All secondary cpus are spinning on a common spinloop, release them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	 * all now so they can start to spin on their individual paca
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 * spinloops. For non SMP kernels, the secondary cpus never get out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * of the common spinloop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	ptr  = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			- PHYSICAL_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	*ptr = ppc_function_entry(generic_secondary_smp_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	/* And wait a bit for them to catch up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	for (i = 0; i < 100000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		HMT_low();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		if (spinning_secondaries == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * Initialize some remaining members of the ppc64_caches and systemcfg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * (at least until we get rid of them completely). This is mostly some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  * cache informations about the CPU that will be used by cache flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  * routines and/or provided to userland
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			    u32 bsize, u32 sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	info->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	info->sets = sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	info->line_size = lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	info->block_size = bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	info->log_block_size = __ilog2(bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (bsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		info->blocks_per_page = PAGE_SIZE / bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		info->blocks_per_page = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	if (sets == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		info->assoc = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		info->assoc = size / (sets * lsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) static bool __init parse_cache_info(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 				    bool icache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 				    struct ppc_cache_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	static const char *ipropnames[] __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		"i-cache-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		"i-cache-sets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		"i-cache-block-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		"i-cache-line-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	static const char *dpropnames[] __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		"d-cache-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		"d-cache-sets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		"d-cache-block-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		"d-cache-line-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	const char **propnames = icache ? ipropnames : dpropnames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	const __be32 *sizep, *lsizep, *bsizep, *setsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	u32 size, lsize, bsize, sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	bool success = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	sets = -1u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	lsize = bsize = cur_cpu_spec->dcache_bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	sizep = of_get_property(np, propnames[0], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (sizep != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		size = be32_to_cpu(*sizep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	setsp = of_get_property(np, propnames[1], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	if (setsp != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		sets = be32_to_cpu(*setsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	bsizep = of_get_property(np, propnames[2], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	lsizep = of_get_property(np, propnames[3], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	if (bsizep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		bsizep = lsizep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	if (lsizep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		lsizep = bsizep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	if (lsizep != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		lsize = be32_to_cpu(*lsizep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (bsizep != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		bsize = be32_to_cpu(*bsizep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (sizep == NULL || bsizep == NULL || lsizep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		success = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	 * OF is weird .. it represents fully associative caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	 * as "1 way" which doesn't make much sense and doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	 * leave room for direct mapped. We'll assume that 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	 * in OF means direct mapped for that reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (sets == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		sets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	else if (sets == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		sets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	init_cache_info(info, size, lsize, bsize, sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	return success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) void __init initialize_cache_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	struct device_node *cpu = NULL, *l2, *l3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	u32 pvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 * All shipping POWER8 machines have a firmware bug that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 * puts incorrect information in the device-tree. This will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	 * be (hopefully) fixed for future chips but for now hard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	 * code the values if we are running on one of these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	pvr = PVR_VER(mfspr(SPRN_PVR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	    pvr == PVR_POWER8NVL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 						/* size    lsize   blk  sets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		init_cache_info(&ppc64_caches.l1i, 0x8000,   128,  128, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		init_cache_info(&ppc64_caches.l1d, 0x10000,  128,  128, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		init_cache_info(&ppc64_caches.l2,  0x80000,  128,  0,   512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		init_cache_info(&ppc64_caches.l3,  0x800000, 128,  0,   8192);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		cpu = of_find_node_by_type(NULL, "cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	 * We're assuming *all* of the CPUs have the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	 * d-cache and i-cache sizes... -Peter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	if (cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			pr_warn("Argh, can't find dcache properties !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			pr_warn("Argh, can't find icache properties !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		 * Try to find the L2 and L3 if any. Assume they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		 * unified and use the D-side properties.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		l2 = of_find_next_cache_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		of_node_put(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		if (l2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			parse_cache_info(l2, false, &ppc64_caches.l2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			l3 = of_find_next_cache_node(l2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			of_node_put(l2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		if (l3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			parse_cache_info(l3, false, &ppc64_caches.l3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			of_node_put(l3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	/* For use by binfmt_elf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	dcache_bsize = ppc64_caches.l1d.block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	icache_bsize = ppc64_caches.l1i.block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	cur_cpu_spec->dcache_bsize = dcache_bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	cur_cpu_spec->icache_bsize = icache_bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  * This returns the limit below which memory accesses to the linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * mapping are guarnateed not to cause an architectural exception (e.g.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * TLB or SLB miss fault).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * This is used to allocate PACAs and various interrupt stacks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * that are accessed early in interrupt handlers that must not cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  * re-entrant interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) __init u64 ppc64_bolted_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	/* Freescale BookE bolts the entire linear mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	/* XXX: BookE ppc64_rma_limit setup seems to disagree? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		return linear_map_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	/* Other BookE, we assume the first GB is bolted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	return 1ul << 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	/* BookS radix, does not take faults on linear mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	if (early_radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		return ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	/* BookS hash, the first segment is bolted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		return 1UL << SID_SHIFT_1T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	return 1UL << SID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) static void *__init alloc_stack(unsigned long limit, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 				     MEMBLOCK_LOW_LIMIT, limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 				     early_cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		panic("cannot allocate stacks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) void __init irqstack_early_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	u64 limit = ppc64_bolted_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	 * Interrupt stacks must be in the first segment since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	 * cannot afford to take SLB misses on them. They are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 * accessed in realmode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		softirq_ctx[i] = alloc_stack(limit, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		hardirq_ctx[i] = alloc_stack(limit, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) void __init exc_lvl_early_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		void *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		sp = alloc_stack(ULONG_MAX, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		critirq_ctx[i] = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		sp = alloc_stack(ULONG_MAX, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		dbgirq_ctx[i] = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		sp = alloc_stack(ULONG_MAX, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		mcheckirq_ctx[i] = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		patch_exception(0x040, exc_debug_debug_book3e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725)  * Stack space used when we detect a bad kernel stack pointer, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726)  * early in SMP boots before relocation is enabled. Exclusive emergency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727)  * stack for machine checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) void __init emergency_stack_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	u64 limit, mce_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	 * Emergency stacks must be under 256MB, we cannot afford to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	 * SLB misses on them. The ABI also requires them to be 128-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	 * aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 * Since we use these as temporary stacks during secondary CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	 * bringup, machine check, system reset, and HMI, we need to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	 * at them in real mode. This means they must also be within the RMO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	 * region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 * The IRQ stacks allocated elsewhere in this file are zeroed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 * initialized in kernel/irq.c. These are initialized here in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	 * to have emergency stacks available as early as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * Machine check on pseries calls rtas, but can't use the static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 * rtas_args due to a machine check hitting while the lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 * rtas args have to be under 4GB, so the machine check stack is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	 * limited to 4GB so args can be put on stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		mce_limit = SZ_4G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		/* emergency stack for NMI exception handling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		/* emergency stack for machine check exception handling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774)  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775)  * @cpu: cpu to allocate for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  * @size: size allocation in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  * @align: alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  * does the right thing for NUMA regardless of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  * Pointer to the allocated area on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 					size_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	int node = early_cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (!node_online(node) || !NODE_DATA(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		ptr = memblock_alloc_from(size, align, goal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		pr_info("cpu %d has no node %d or node-local memory\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			cpu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			 cpu, size, __pa(ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		ptr = memblock_alloc_try_nid(size, align, goal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 					     MEMBLOCK_ALLOC_ACCESSIBLE, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			 "%016lx\n", cpu, size, node, __pa(ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	return memblock_alloc_from(size, align, goal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) static void __init pcpu_free_bootmem(void *ptr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	memblock_free(__pa(ptr), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static int pcpu_cpu_distance(unsigned int from, unsigned int to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (early_cpu_to_node(from) == early_cpu_to_node(to))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		return LOCAL_DISTANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		return REMOTE_DISTANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) EXPORT_SYMBOL(__per_cpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) static void __init pcpu_populate_pte(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	pgd_t *pgd = pgd_offset_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (p4d_none(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		pud_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		p4d_populate(&init_mm, p4d, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (pud_none(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		pmd_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		pud_populate(&init_mm, pud, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (!pmd_present(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		pte_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		pmd_populate_kernel(&init_mm, pmd, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	      __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) void __init setup_per_cpu_areas(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	size_t atom_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	unsigned long delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	int rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 * Linear mapping is one of 4K, 1M and 16M.  For 4K, no need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	 * to group units.  For larger mappings, use 1M atom which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	 * should be large enough to contain a number of units.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	if (mmu_linear_psize == MMU_PAGE_4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		atom_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		atom_size = 1 << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 					    pcpu_alloc_bootmem, pcpu_free_bootmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			pr_warn("PERCPU: %s allocator failed (%d), "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 				"falling back to page size\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 				pcpu_fc_names[pcpu_chosen_fc], rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		rc = pcpu_page_first_chunk(0, pcpu_alloc_bootmem, pcpu_free_bootmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 					   pcpu_populate_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		panic("cannot initialize percpu area (err=%d)", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)                 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) unsigned long memory_block_size_bytes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (ppc_md.memory_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		return ppc_md.memory_block_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	return MIN_MEMORY_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) struct ppc_pci_io ppc_pci_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) EXPORT_SYMBOL(ppc_pci_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) u64 hw_nmi_get_sample_period(int watchdog_thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	return ppc_proc_freq * watchdog_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * The perf based hardlockup detector breaks PMU event based branches, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * disable it by default. Book3S has a soft-nmi hardlockup detector based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * on the decrementer interrupt, so it does not suffer from this problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  * It is likely to get false positives in VM guests, so disable it there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * by default too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) static int __init disable_hardlockup_detector(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	hardlockup_detector_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (firmware_has_feature(FW_FEATURE_LPAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		hardlockup_detector_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) early_initcall(disable_hardlockup_detector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) static enum l1d_flush_type enabled_flush_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) static void *l1d_flush_fallback_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) static bool no_rfi_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) static bool no_entry_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static bool no_uaccess_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) bool rfi_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) bool entry_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) bool uaccess_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) EXPORT_SYMBOL(uaccess_flush_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) static int __init handle_no_rfi_flush(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	pr_info("rfi-flush: disabled on command line.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	no_rfi_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) early_param("no_rfi_flush", handle_no_rfi_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) static int __init handle_no_entry_flush(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	pr_info("entry-flush: disabled on command line.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	no_entry_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) early_param("no_entry_flush", handle_no_entry_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static int __init handle_no_uaccess_flush(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	pr_info("uaccess-flush: disabled on command line.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	no_uaccess_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) early_param("no_uaccess_flush", handle_no_uaccess_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * The RFI flush is not KPTI, but because users will see doco that says to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * nopti we hijack that option here to also disable the RFI flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) static int __init handle_no_pti(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	handle_no_rfi_flush(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) early_param("nopti", handle_no_pti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void do_nothing(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 * We don't need to do the flush explicitly, just enter+exit kernel is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	 * sufficient, the RFI exit handlers will do the right thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) void rfi_flush_enable(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		do_rfi_flush_fixups(enabled_flush_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		on_each_cpu(do_nothing, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		do_rfi_flush_fixups(L1D_FLUSH_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	rfi_flush = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) void entry_flush_enable(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		do_entry_flush_fixups(enabled_flush_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		on_each_cpu(do_nothing, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		do_entry_flush_fixups(L1D_FLUSH_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	entry_flush = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) void uaccess_flush_enable(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		do_uaccess_flush_fixups(enabled_flush_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		static_branch_enable(&uaccess_flush_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		on_each_cpu(do_nothing, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		static_branch_disable(&uaccess_flush_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		do_uaccess_flush_fixups(L1D_FLUSH_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	uaccess_flush = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static void __ref init_fallback_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	u64 l1d_size, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	/* Only allocate the fallback flush area once (at boot time). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	if (l1d_flush_fallback_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	l1d_size = ppc64_caches.l1d.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	 * If there is no d-cache-size property in the device tree, l1d_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	 * could be zero. That leads to the loop in the asm wrapping around to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	 * 2^64-1, and then walking off the end of the fallback area and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	 * eventually causing a page fault which is fatal. Just default to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	 * something vaguely sane.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (!l1d_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		l1d_size = (64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	limit = min(ppc64_bolted_size(), ppc64_rma_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	 * Align to L1d size, and size it at 2x L1d size, to catch possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	 * hardware prefetch runoff. We don't have a recipe for load patterns to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	 * reliably avoid the prefetcher.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 						l1d_size, MEMBLOCK_LOW_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 						limit, NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	if (!l1d_flush_fallback_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		      __func__, l1d_size * 2, l1d_size, &limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		struct paca_struct *paca = paca_ptrs[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		paca->l1d_flush_size = l1d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) void setup_rfi_flush(enum l1d_flush_type types, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (types & L1D_FLUSH_FALLBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		pr_info("rfi-flush: fallback displacement flush available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		init_fallback_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (types & L1D_FLUSH_ORI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		pr_info("rfi-flush: ori type flush available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	if (types & L1D_FLUSH_MTTRIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		pr_info("rfi-flush: mttrig type flush available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	enabled_flush_types = types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	if (!cpu_mitigations_off() && !no_rfi_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		rfi_flush_enable(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) void setup_entry_flush(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (cpu_mitigations_off())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	if (!no_entry_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		entry_flush_enable(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) void setup_uaccess_flush(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	if (cpu_mitigations_off())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	if (!no_uaccess_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		uaccess_flush_enable(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static int rfi_flush_set(void *data, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	bool enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (val == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		enable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	else if (val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		enable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	/* Only do anything if we're changing state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (enable != rfi_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		rfi_flush_enable(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static int rfi_flush_get(void *data, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	*val = rfi_flush ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static int entry_flush_set(void *data, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	bool enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (val == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		enable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	else if (val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		enable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	/* Only do anything if we're changing state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (enable != entry_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		entry_flush_enable(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static int entry_flush_get(void *data, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	*val = entry_flush ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static int uaccess_flush_set(void *data, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	bool enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	if (val == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		enable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	else if (val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		enable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	/* Only do anything if we're changing state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (enable != uaccess_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		uaccess_flush_enable(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static int uaccess_flush_get(void *data, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	*val = uaccess_flush ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static __init int rfi_flush_debugfs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) device_initcall(rfi_flush_debugfs_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) #endif /* CONFIG_PPC_BOOK3S_64 */