Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2018 Chen-Yu Tsai
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Chen-Yu Tsai <wens@csie.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * arch/arm/mach-sunxi/mc_smp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Based on Allwinner code, arch/arm/mach-exynos/mcpm-exynos.c, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * arch/arm/mach-hisi/platmcpm.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Cluster cache enable trampoline code adapted from MCPM framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/arm-cci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/irqchip/arm-gic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/cp15.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/idmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define SUNXI_CPUS_PER_CLUSTER		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define SUNXI_NR_CLUSTERS		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define POLL_USEC	100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define TIMEOUT_USEC	100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define CPUCFG_CX_CTRL_REG0(c)		(0x10 * (c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(n)	BIT(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL	0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A7	BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define CPUCFG_CX_CTRL_REG1(c)		(0x10 * (c) + 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define CPUCFG_CX_CTRL_REG1_ACINACTM	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define CPUCFG_CX_STATUS(c)		(0x30 + 0x4 * (c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define CPUCFG_CX_STATUS_STANDBYWFI(n)	BIT(16 + (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define CPUCFG_CX_STATUS_STANDBYWFIL2	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define CPUCFG_CX_RST_CTRL(c)		(0x80 + 0x4 * (c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define CPUCFG_CX_RST_CTRL_DBG_SOC_RST	BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define CPUCFG_CX_RST_CTRL_ETM_RST(n)	BIT(20 + (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define CPUCFG_CX_RST_CTRL_ETM_RST_ALL	(0xf << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define CPUCFG_CX_RST_CTRL_DBG_RST(n)	BIT(16 + (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define CPUCFG_CX_RST_CTRL_DBG_RST_ALL	(0xf << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define CPUCFG_CX_RST_CTRL_H_RST	BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define CPUCFG_CX_RST_CTRL_L2_RST	BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define CPUCFG_CX_RST_CTRL_CX_RST(n)	BIT(4 + (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define CPUCFG_CX_RST_CTRL_CORE_RST(n)	BIT(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define CPUCFG_CX_RST_CTRL_CORE_RST_ALL	(0xf << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define PRCM_CPU_PO_RST_CTRL(c)		(0x4 + 0x4 * (c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define PRCM_CPU_PO_RST_CTRL_CORE(n)	BIT(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define PRCM_CPU_PO_RST_CTRL_CORE_ALL	0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define PRCM_PWROFF_GATING_REG(c)	(0x100 + 0x4 * (c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) /* The power off register for clusters are different from a80 and a83t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I	BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define PRCM_PWROFF_GATING_REG_CORE(n)	BIT(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define PRCM_PWR_SWITCH_REG(c, cpu)	(0x140 + 0x10 * (c) + 0x4 * (cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define PRCM_CPU_SOFT_ENTRY_REG		0x164
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) /* R_CPUCFG registers, specific to sun8i-a83t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define R_CPUCFG_CLUSTER_PO_RST_CTRL(c)	(0x30 + (c) * 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(n)	BIT(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define R_CPUCFG_CPU_SOFT_ENTRY_REG		0x01a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define CPU0_SUPPORT_HOTPLUG_MAGIC0	0xFA50392F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define CPU0_SUPPORT_HOTPLUG_MAGIC1	0x790DCA3A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static void __iomem *cpucfg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static void __iomem *prcm_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static void __iomem *sram_b_smp_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static void __iomem *r_cpucfg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) extern void sunxi_mc_smp_secondary_startup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) extern void sunxi_mc_smp_resume(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static bool is_a83t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	bool is_compatible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	node = of_cpu_device_node_get(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	/* In case of_cpu_device_node_get fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		node = of_get_cpu_node(cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		 * There's no point in returning an error, since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		 * would be mid way in a core or cluster power sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		pr_err("%s: Couldn't get CPU cluster %u core %u device node\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		       __func__, cluster, core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	is_compatible = of_device_is_compatible(node, "arm,cortex-a15");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	return is_compatible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				      bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	/* control sequence from Allwinner A80 user manual v1.2 PRCM section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		if (reg == 0x00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			pr_debug("power clamp for cluster %u cpu %u already open\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				 cluster, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void sunxi_cpu0_hotplug_support_set(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		writel(CPU0_SUPPORT_HOTPLUG_MAGIC0, sram_b_smp_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		writel(CPU0_SUPPORT_HOTPLUG_MAGIC1, sram_b_smp_base + 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		writel(0x0, sram_b_smp_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		writel(0x0, sram_b_smp_base + 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	/* Set hotplug support magic flags for cpu0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (cluster == 0 && cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		sunxi_cpu0_hotplug_support_set(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	/* assert processor power-on reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	reg &= ~PRCM_CPU_PO_RST_CTRL_CORE(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	if (is_a83t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		/* assert cpu power-on reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		reg  = readl(r_cpucfg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			     R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		reg &= ~(R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		writel(reg, r_cpucfg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		       R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	/* Cortex-A7: hold L1 reset disable signal low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (!sunxi_core_is_cortex_a15(cpu, cluster)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	/* assert processor related resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 * Allwinner code also asserts resets for NEON on A15. According
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	 * to ARM manuals, asserting power-on reset is sufficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (!sunxi_core_is_cortex_a15(cpu, cluster))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/* open power switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	sunxi_cpu_power_switch_set(cpu, cluster, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	/* Handle A83T bit swap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (is_a83t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		if (cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			cpu = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	/* clear processor power gate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	reg &= ~PRCM_PWROFF_GATING_REG_CORE(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	/* Handle A83T bit swap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (is_a83t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		if (cpu == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/* de-assert processor power-on reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	reg |= PRCM_CPU_PO_RST_CTRL_CORE(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (is_a83t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		reg  = readl(r_cpucfg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			     R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		reg |= R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		writel(reg, r_cpucfg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		       R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	/* de-assert all processor resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	reg |= CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	reg |= CPUCFG_CX_RST_CTRL_CORE_RST(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (!sunxi_core_is_cortex_a15(cpu, cluster))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		reg |= CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		reg |= CPUCFG_CX_RST_CTRL_CX_RST(cpu); /* NEON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static int sunxi_cluster_powerup(unsigned int cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	pr_debug("%s: cluster %u\n", __func__, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (cluster >= SUNXI_NR_CLUSTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	/* For A83T, assert cluster cores resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (is_a83t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL;   /* Core Reset    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	/* assert ACINACTM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	reg |= CPUCFG_CX_CTRL_REG1_ACINACTM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	/* assert cluster processor power-on resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	reg &= ~PRCM_CPU_PO_RST_CTRL_CORE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	/* assert cluster cores resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (is_a83t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		reg  = readl(r_cpucfg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			     R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		writel(reg, r_cpucfg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		       R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/* assert cluster resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	reg &= ~CPUCFG_CX_RST_CTRL_H_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	reg &= ~CPUCFG_CX_RST_CTRL_L2_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	 * Allwinner code also asserts resets for NEON on A15. According
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	 * to ARM manuals, asserting power-on reset is sufficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	if (!sunxi_core_is_cortex_a15(0, cluster))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	/* hold L1/L2 reset disable signals low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (sunxi_core_is_cortex_a15(0, cluster)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		/* Cortex-A15: hold L2RSTDISABLE low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		reg &= ~CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		/* Cortex-A7: hold L1RSTDISABLE and L2RSTDISABLE low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		reg &= ~CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	/* clear cluster power gate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (is_a83t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	/* de-assert cluster resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	reg |= CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	reg |= CPUCFG_CX_RST_CTRL_H_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	reg |= CPUCFG_CX_RST_CTRL_L2_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	/* de-assert ACINACTM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	reg &= ~CPUCFG_CX_CTRL_REG1_ACINACTM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * This bit is shared between the initial nocache_trampoline call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * enable CCI-400 and proper cluster cache disable before power down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void sunxi_cluster_cache_disable_without_axi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		 * On the Cortex-A15 we need to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		 * L2 prefetching before flushing the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		"mcr	p15, 1, %0, c15, c0, 3\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		"isb\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		"dsb"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		: : "r" (0x400));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	/* Flush all cache levels for this cluster. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	v7_exit_coherency_flush(all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	 * Disable cluster-level coherency by masking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * incoming snoops and DVM messages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	cci_disable_port_by_cpu(read_cpuid_mpidr());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static int sunxi_mc_smp_cpu_table[SUNXI_NR_CLUSTERS][SUNXI_CPUS_PER_CLUSTER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int sunxi_mc_smp_first_comer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static DEFINE_SPINLOCK(boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static bool sunxi_mc_smp_cluster_is_down(unsigned int cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	for (i = 0; i < SUNXI_CPUS_PER_CLUSTER; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		if (sunxi_mc_smp_cpu_table[cluster][i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void sunxi_mc_smp_secondary_init(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	/* Clear hotplug support magic flags for cpu0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		sunxi_cpu0_hotplug_support_set(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int sunxi_mc_smp_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	unsigned int mpidr, cpu, cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	mpidr = cpu_logical_map(l_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	if (!cpucfg_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	spin_lock_irq(&boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (sunxi_mc_smp_cpu_table[cluster][cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (sunxi_mc_smp_cluster_is_down(cluster)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		sunxi_mc_smp_first_comer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		sunxi_cluster_powerup(cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		sunxi_mc_smp_first_comer = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	/* This is read by incoming CPUs with their cache and MMU disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	sync_cache_w(&sunxi_mc_smp_first_comer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	sunxi_cpu_powerup(cpu, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	sunxi_mc_smp_cpu_table[cluster][cpu]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	spin_unlock_irq(&boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void sunxi_cluster_cache_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	pr_debug("%s: cluster %u\n", __func__, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	sunxi_cluster_cache_disable_without_axi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	/* last man standing, assert ACINACTM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	reg |= CPUCFG_CX_CTRL_REG1_ACINACTM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static void sunxi_mc_smp_cpu_die(unsigned int l_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	unsigned int mpidr, cpu, cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	bool last_man;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	mpidr = cpu_logical_map(l_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	spin_lock(&boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	sunxi_mc_smp_cpu_table[cluster][cpu]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		/* A power_up request went ahead of us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		pr_debug("%s: aborting due to a power up request\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			 __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		spin_unlock(&boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	} else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		pr_err("Cluster %d CPU%d boots multiple times\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		       cluster, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	last_man = sunxi_mc_smp_cluster_is_down(cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	spin_unlock(&boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	gic_cpu_if_down(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if (last_man)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		sunxi_cluster_cache_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		v7_exit_coherency_flush(louis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	for (;;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		wfi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	int gating_bit = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	if (is_a83t && cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		gating_bit = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	/* gate processor power */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	reg |= PRCM_PWROFF_GATING_REG_CORE(gating_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	/* close power switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	sunxi_cpu_power_switch_set(cpu, cluster, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static int sunxi_cluster_powerdown(unsigned int cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	pr_debug("%s: cluster %u\n", __func__, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	if (cluster >= SUNXI_NR_CLUSTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	/* assert cluster resets or system will hang */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	pr_debug("%s: assert cluster reset\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	reg &= ~CPUCFG_CX_RST_CTRL_H_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	reg &= ~CPUCFG_CX_RST_CTRL_L2_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	/* gate cluster power */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	pr_debug("%s: gate cluster power\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (is_a83t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static int sunxi_mc_smp_cpu_kill(unsigned int l_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	unsigned int mpidr, cpu, cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	unsigned int tries, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	mpidr = cpu_logical_map(l_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	/* This should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		    cpu >= SUNXI_CPUS_PER_CLUSTER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	/* wait for CPU core to die and enter WFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	count = TIMEOUT_USEC / POLL_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	spin_lock_irq(&boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	for (tries = 0; tries < count; tries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		spin_unlock_irq(&boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		usleep_range(POLL_USEC / 2, POLL_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		spin_lock_irq(&boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		 * If the user turns off a bunch of cores at the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		 * time, the kernel might call cpu_kill before some of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		 * them are ready. This is because boot_lock serializes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		 * both cpu_die and cpu_kill callbacks. Either one could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		 * run first. We should wait for cpu_die to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		if (sunxi_mc_smp_cpu_table[cluster][cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		if (reg & CPUCFG_CX_STATUS_STANDBYWFI(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	if (tries >= count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		ret = ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	/* power down CPU core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	sunxi_cpu_powerdown(cpu, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	if (!sunxi_mc_smp_cluster_is_down(cluster))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	/* wait for cluster L2 WFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 				 reg & CPUCFG_CX_STATUS_STANDBYWFIL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 				 POLL_USEC, TIMEOUT_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		 * Ignore timeout on the cluster. Leaving the cluster on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		 * will not affect system execution, just use a bit more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		 * power. But returning an error here will only confuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		 * the user as the CPU has already been shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	/* Power down cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	sunxi_cluster_powerdown(cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	spin_unlock_irq(&boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	pr_debug("%s: cluster %u cpu %u powerdown: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		 __func__, cluster, cpu, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	return !ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static bool sunxi_mc_smp_cpu_can_disable(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	/* CPU0 hotplug not handled for sun8i-a83t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	if (is_a83t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		if (cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static const struct smp_operations sunxi_mc_smp_smp_ops __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	.smp_secondary_init	= sunxi_mc_smp_secondary_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	.smp_boot_secondary	= sunxi_mc_smp_boot_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	.cpu_die		= sunxi_mc_smp_cpu_die,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	.cpu_kill		= sunxi_mc_smp_cpu_kill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	.cpu_can_disable	= sunxi_mc_smp_cpu_can_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static bool __init sunxi_mc_smp_cpu_table_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	unsigned int mpidr, cpu, cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	mpidr = read_cpuid_mpidr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 		pr_err("%s: boot CPU is out of bounds!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	sunxi_mc_smp_cpu_table[cluster][cpu] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)  * Adapted from arch/arm/common/mc_smp_entry.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)  * We need the trampoline code to enable CCI-400 on the first cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) typedef typeof(cpu_reset) phys_reset_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static int __init nocache_trampoline(unsigned long __unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	phys_reset_t phys_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	setup_mm_for_reboot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	sunxi_cluster_cache_disable_without_axi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	phys_reset(__pa_symbol(sunxi_mc_smp_resume), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static int __init sunxi_mc_smp_loopback(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	 * We're going to soft-restart the current CPU through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	 * low-level MCPM code by leveraging the suspend/resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	 * infrastructure. Let's play it safe by using cpu_pm_enter()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	 * in case the CPU init code path resets the VFP or similar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	sunxi_mc_smp_first_comer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	local_fiq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	ret = cpu_pm_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		ret = cpu_suspend(0, nocache_trampoline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		cpu_pm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	local_fiq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	sunxi_mc_smp_first_comer = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)  * This holds any device nodes that we requested resources for,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)  * so that we may easily release resources in the error path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct sunxi_mc_smp_nodes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	struct device_node *prcm_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	struct device_node *cpucfg_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	struct device_node *sram_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	struct device_node *r_cpucfg_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* This structure holds SoC-specific bits tied to an enable-method string. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct sunxi_mc_smp_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	const char *enable_method;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	int (*get_smp_nodes)(struct sunxi_mc_smp_nodes *nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	bool is_a83t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void __init sunxi_mc_smp_put_nodes(struct sunxi_mc_smp_nodes *nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	of_node_put(nodes->prcm_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	of_node_put(nodes->cpucfg_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	of_node_put(nodes->sram_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	of_node_put(nodes->r_cpucfg_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	memset(nodes, 0, sizeof(*nodes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static int __init sun9i_a80_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	nodes->prcm_node = of_find_compatible_node(NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 						   "allwinner,sun9i-a80-prcm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	if (!nodes->prcm_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		pr_err("%s: PRCM not available\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	nodes->cpucfg_node = of_find_compatible_node(NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 						     "allwinner,sun9i-a80-cpucfg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	if (!nodes->cpucfg_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		pr_err("%s: CPUCFG not available\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	nodes->sram_node = of_find_compatible_node(NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 						   "allwinner,sun9i-a80-smp-sram");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	if (!nodes->sram_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		pr_err("%s: Secure SRAM not available\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static int __init sun8i_a83t_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	nodes->prcm_node = of_find_compatible_node(NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 						   "allwinner,sun8i-a83t-r-ccu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	if (!nodes->prcm_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		pr_err("%s: PRCM not available\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	nodes->cpucfg_node = of_find_compatible_node(NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 						     "allwinner,sun8i-a83t-cpucfg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	if (!nodes->cpucfg_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		pr_err("%s: CPUCFG not available\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	nodes->r_cpucfg_node = of_find_compatible_node(NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 						       "allwinner,sun8i-a83t-r-cpucfg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	if (!nodes->r_cpucfg_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		pr_err("%s: RCPUCFG not available\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static const struct sunxi_mc_smp_data sunxi_mc_smp_data[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		.enable_method	= "allwinner,sun9i-a80-smp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		.get_smp_nodes	= sun9i_a80_get_smp_nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		.enable_method	= "allwinner,sun8i-a83t-smp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		.get_smp_nodes	= sun8i_a83t_get_smp_nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		.is_a83t	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static int __init sunxi_mc_smp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	struct sunxi_mc_smp_nodes nodes = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	 * Don't bother checking the "cpus" node, as an enable-method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	 * property in that node is undocumented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	node = of_cpu_device_node_get(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	 * We can't actually use the enable-method magic in the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	 * Our loopback / trampoline code uses the CPU suspend framework,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	 * which requires the identity mapping be available. It would not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	 * yet be available if we used the .init_cpus or .prepare_cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	 * callbacks in smp_operations, which we would use if we were to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	 * use CPU_METHOD_OF_DECLARE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 		ret = of_property_match_string(node, "enable-method",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 					       sunxi_mc_smp_data[i].enable_method);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	is_a83t = sunxi_mc_smp_data[i].is_a83t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	if (!sunxi_mc_smp_cpu_table_init())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	if (!cci_probed()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 		pr_err("%s: CCI-400 not available\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	/* Get needed device tree nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 	ret = sunxi_mc_smp_data[i].get_smp_nodes(&nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 		goto err_put_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	 * Unfortunately we can not request the I/O region for the PRCM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	 * It is shared with the PRCM clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	prcm_base = of_iomap(nodes.prcm_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	if (!prcm_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 		pr_err("%s: failed to map PRCM registers\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 		goto err_put_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	cpucfg_base = of_io_request_and_map(nodes.cpucfg_node, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 					    "sunxi-mc-smp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	if (IS_ERR(cpucfg_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 		ret = PTR_ERR(cpucfg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 		pr_err("%s: failed to map CPUCFG registers: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 		       __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 		goto err_unmap_prcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	if (is_a83t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 		r_cpucfg_base = of_io_request_and_map(nodes.r_cpucfg_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 						      0, "sunxi-mc-smp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 		if (IS_ERR(r_cpucfg_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 			ret = PTR_ERR(r_cpucfg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 			pr_err("%s: failed to map R-CPUCFG registers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 			       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 			goto err_unmap_release_cpucfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 		sram_b_smp_base = of_io_request_and_map(nodes.sram_node, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 							"sunxi-mc-smp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		if (IS_ERR(sram_b_smp_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 			ret = PTR_ERR(sram_b_smp_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 			pr_err("%s: failed to map secure SRAM\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 			goto err_unmap_release_cpucfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	/* Configure CCI-400 for boot cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 	ret = sunxi_mc_smp_loopback();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 		pr_err("%s: failed to configure boot cluster: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 		       __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 		goto err_unmap_release_sram_rcpucfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 	/* We don't need the device nodes anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	sunxi_mc_smp_put_nodes(&nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 	/* Set the hardware entry point address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 	if (is_a83t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 		addr = r_cpucfg_base + R_CPUCFG_CPU_SOFT_ENTRY_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 		addr = prcm_base + PRCM_CPU_SOFT_ENTRY_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 	writel(__pa_symbol(sunxi_mc_smp_secondary_startup), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 	/* Actually enable multi cluster SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 	smp_set_ops(&sunxi_mc_smp_smp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 	pr_info("sunxi multi cluster SMP support installed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) err_unmap_release_sram_rcpucfg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	if (is_a83t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 		iounmap(r_cpucfg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 		of_address_to_resource(nodes.r_cpucfg_node, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 		iounmap(sram_b_smp_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 		of_address_to_resource(nodes.sram_node, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 	release_mem_region(res.start, resource_size(&res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) err_unmap_release_cpucfg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 	iounmap(cpucfg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 	of_address_to_resource(nodes.cpucfg_node, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 	release_mem_region(res.start, resource_size(&res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) err_unmap_prcm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 	iounmap(prcm_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) err_put_nodes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 	sunxi_mc_smp_put_nodes(&nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) early_initcall(sunxi_mc_smp_init);