Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2013 MundoReader S.L.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Heiko Stuebner <heiko@sntech.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/cp15.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/smp_scu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/mach/map.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static void __iomem *scu_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static void __iomem *sram_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static int ncores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define PMU_PWRDN_CON		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define PMU_PWRDN_ST		0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define PMU_PWRDN_SCU		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static struct regmap *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static int has_pmu = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static int pmu_power_domain_is_on(int pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	ret = regmap_read(pmu, PMU_PWRDN_ST, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	return !(val & BIT(pd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static struct reset_control *rockchip_get_core_reset(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct device *dev = get_cpu_device(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	/* The cpu device is only available after the initial core bringup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		np = of_get_cpu_node(cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	return of_reset_control_get_exclusive(np, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static int pmu_set_power_domain(int pd, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	u32 val = (on) ? 0 : BIT(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct reset_control *rstc = rockchip_get_core_reset(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		pr_err("%s: could not get reset control for core %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		       __func__, pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return PTR_ERR(rstc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * We need to soft reset the cpu when we turn off the cpu power domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * or else the active processors might be stalled when the individual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * processor is powered down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (!IS_ERR(rstc) && !on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		reset_control_assert(rstc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (has_pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			pr_err("%s: could not update power domain\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		while (ret != on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			ret = pmu_power_domain_is_on(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 				pr_err("%s: could not read power domain state\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 				       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (!IS_ERR(rstc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			reset_control_deassert(rstc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		reset_control_put(rstc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * Handling of CPU cores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (!sram_base_addr || (has_pmu && !pmu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		pr_err("%s: sram or pmu missing for cpu boot\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	if (cpu >= ncores) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		pr_err("%s: cpu %d outside maximum number of cpus %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		       __func__, cpu, ncores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	/* start the core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	ret = pmu_set_power_domain(0 + cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		 * We communicate with the bootrom to active the cpus other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		 * than cpu0, after a blob of initialize code, they will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		 * stay at wfe state, once they are actived, they will check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		 * the mailbox:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		 * sram_base_addr + 4: 0xdeadbeaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		 * sram_base_addr + 8: start address for pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		 * The cpu0 need to wait the other cpus other than cpu0 entering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		 * the wfe state.The wait time is affected by many aspects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		 * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		mdelay(1); /* ensure the cpus other than cpu0 to startup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		writel(__pa_symbol(secondary_startup), sram_base_addr + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		writel(0xDEADBEAF, sram_base_addr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		dsb_sev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * rockchip_smp_prepare_sram - populate necessary sram block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * Starting cores execute the code residing at the start of the on-chip sram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * after power-on. Therefore make sure, this sram region is reserved and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * big enough. After this check, copy the trampoline code that directs the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * core to the real startup code in ram into the sram-region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * @node: mmio-sram device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static int __init rockchip_smp_prepare_sram(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	unsigned int trampoline_sz = &rockchip_secondary_trampoline_end -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 					    &rockchip_secondary_trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	unsigned int rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	ret = of_address_to_resource(node, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		pr_err("%s: could not get address for node %pOF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		       __func__, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	rsize = resource_size(&res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (rsize < trampoline_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		pr_err("%s: reserved block with size 0x%x is too small for trampoline size 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		       __func__, rsize, trampoline_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	/* set the boot function for the sram code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	rockchip_boot_fn = __pa_symbol(secondary_startup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	/* copy the trampoline to sram, that runs during startup of the core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	outer_clean_range(0, trampoline_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	dsb_sev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static const struct regmap_config rockchip_pmu_regmap_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	.name = "rockchip-pmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	.reg_bits = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	.val_bits = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	.reg_stride = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static int __init rockchip_smp_prepare_pmu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	void __iomem *pmu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 * This function is only called via smp_ops->smp_prepare_cpu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 * That only happens if a "/cpus" device tree node exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	 * and has an "enable-method" property that selects the SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	 * operations defined herein.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	node = of_find_node_by_path("/cpus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	pmu = syscon_regmap_lookup_by_phandle(node, "rockchip,pmu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	if (!IS_ERR(pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	pmu = syscon_regmap_lookup_by_compatible("rockchip,rk3066-pmu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (!IS_ERR(pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	/* fallback, create our own regmap for the pmu area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	pmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		pr_err("%s: could not find pmu dt node\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	pmu_base = of_iomap(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	if (!pmu_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		pr_err("%s: could not map pmu registers\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	pmu = regmap_init_mmio(NULL, pmu_base, &rockchip_pmu_regmap_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (IS_ERR(pmu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		int ret = PTR_ERR(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		iounmap(pmu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		pmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		pr_err("%s: regmap init failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-smp-sram");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		pr_err("%s: could not find sram dt node\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	sram_base_addr = of_iomap(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (!sram_base_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		pr_err("%s: could not map sram registers\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (has_pmu && rockchip_smp_prepare_pmu()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		if (rockchip_smp_prepare_sram(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		/* enable the SCU power domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		pmu_set_power_domain(PMU_PWRDN_SCU, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			pr_err("%s: missing scu\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		scu_base_addr = of_iomap(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		if (!scu_base_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			pr_err("%s: could not map scu registers\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		 * While the number of cpus is gathered from dt, also get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		 * number of cores from the scu to verify this value when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		 * booting the cores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		ncores = scu_get_core_count(scu_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		pr_err("%s: ncores %d\n", __func__, ncores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		scu_enable(scu_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		unsigned int l2ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		ncores = ((l2ctlr >> 24) & 0x3) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	/* Make sure that all cores except the first are really off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	for (i = 1; i < ncores; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		pmu_set_power_domain(0 + i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void __init rk3036_smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	has_pmu = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	rockchip_smp_prepare_cpus(max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static int rockchip_cpu_kill(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	 * We need a delay here to ensure that the dying CPU can finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	 * executing v7_coherency_exit() and reach the WFI/WFE state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	 * prior to having the power domain disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	pmu_set_power_domain(0 + cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void rockchip_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	v7_exit_coherency_flush(louis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	while (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		cpu_do_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static const struct smp_operations rk3036_smp_ops __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	.smp_prepare_cpus	= rk3036_smp_prepare_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	.smp_boot_secondary	= rockchip_boot_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	.cpu_kill		= rockchip_cpu_kill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	.cpu_die		= rockchip_cpu_die,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static const struct smp_operations rockchip_smp_ops __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	.smp_prepare_cpus	= rockchip_smp_prepare_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	.smp_boot_secondary	= rockchip_boot_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	.cpu_kill		= rockchip_cpu_kill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	.cpu_die		= rockchip_cpu_die,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) CPU_METHOD_OF_DECLARE(rk3036_smp, "rockchip,rk3036-smp", &rk3036_smp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) CPU_METHOD_OF_DECLARE(rk3066_smp, "rockchip,rk3066-smp", &rockchip_smp_ops);