Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * This file contains Xilinx specific SMP code, used to start up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * the second processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2011-2013 Xilinx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * based on linux/arch/arm/mach-realview/platsmp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Copyright (C) 2002 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/smp_scu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/irqchip/arm-gic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Store number of cores in the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Because of scu_get_core_count() must be in __init section and can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * be called from zynq_cpun_start() because it is not in __init section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static int ncores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) int zynq_cpun_start(u32 address, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	u32 trampoline_code_size = &zynq_secondary_trampoline_end -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 						&zynq_secondary_trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	u32 phy_cpuid = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	/* MS: Expectation that SLCR are directly map and accessible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	/* Not possible to jump to non aligned address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (!(address & 3) && (!address || (address >= trampoline_code_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		/* Store pointer to ioremap area which points to address 0x0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		static u8 __iomem *zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		u32 trampoline_size = &zynq_secondary_trampoline_jump -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 						&zynq_secondary_trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		zynq_slcr_cpu_stop(phy_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		if (address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 			if (__pa(PAGE_OFFSET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 				zero = ioremap(0, trampoline_code_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 				if (!zero) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 					pr_warn("BOOTUP jump vectors not accessible\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 					return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 				zero = (__force u8 __iomem *)PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			* This is elegant way how to jump to any address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			* 0x0: Load address at 0x8 to r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 			* 0x4: Jump by mov instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			* 0x8: Jumping address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			memcpy_toio(zero, &zynq_secondary_trampoline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 							trampoline_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			writel(address, zero + trampoline_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 			flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 			outer_flush_range(0, trampoline_code_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 			smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			if (__pa(PAGE_OFFSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 				iounmap(zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		zynq_slcr_cpu_start(phy_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) EXPORT_SYMBOL(zynq_cpun_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	return zynq_cpun_start(__pa_symbol(secondary_startup_arm), cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * Initialise the CPU possible map early - this describes the CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * which may be present or become present in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static void __init zynq_smp_init_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	ncores = scu_get_core_count(zynq_scu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	for (i = 0; i < ncores && i < CONFIG_NR_CPUS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		set_cpu_possible(i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void __init zynq_smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	scu_enable(zynq_scu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * zynq_secondary_init - Initialize secondary CPU cores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * @cpu:	CPU that is initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * This function is in the hotplug path. Don't move it into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * init section!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void zynq_secondary_init(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	zynq_core_pm_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static int zynq_cpu_kill(unsigned cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	unsigned long timeout = jiffies + msecs_to_jiffies(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	while (zynq_slcr_cpu_state_read(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		if (time_after(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	zynq_slcr_cpu_stop(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * zynq_cpu_die - Let a CPU core die
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * @cpu:	Dying CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * Platform-specific code to shutdown a CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * Called with IRQs disabled on the dying CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void zynq_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	zynq_slcr_cpu_state_write(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	 * there is no power-control hardware on this platform, so all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 * we can do is put the core into WFI; this is safe as the calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * code will have already disabled interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	for (;;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		cpu_do_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) const struct smp_operations zynq_smp_ops __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	.smp_init_cpus		= zynq_smp_init_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	.smp_prepare_cpus	= zynq_smp_prepare_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	.smp_boot_secondary	= zynq_boot_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	.smp_secondary_init	= zynq_secondary_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	.cpu_die		= zynq_cpu_die,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	.cpu_kill		= zynq_cpu_kill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };