^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/plat-versatile/platsmp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2002 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * All Rights Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This code is specific to the hardware found on ARM Realview and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Versatile Express platforms where the CPUs are unable to be individually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * woken, and where there is no way to hot-unplug CPUs. Real platforms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * should not copy this code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <plat/platsmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * versatile_cpu_release controls the release of CPUs from the holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * pen in headsmp.S, which exists because we are not always able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * control the release of individual CPUs from the board firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Production platforms do not need this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) volatile int versatile_cpu_release = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Write versatile_cpu_release in a way that is guaranteed to be visible to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * all observers, irrespective of whether they're taking part in coherency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * or not. This is necessary for the hotplug code to work reliably.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static void versatile_write_cpu_release(int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) versatile_cpu_release = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) sync_cache_w(&versatile_cpu_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * versatile_lock exists to avoid running the loops_per_jiffy delay loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * calibrations on the secondary CPU while the requesting CPU is using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * the limited-bandwidth bus - which affects the calibration value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Production platforms do not need this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static DEFINE_RAW_SPINLOCK(versatile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void versatile_secondary_init(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * let the primary processor know we're out of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * pen, then head off into the C entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) versatile_write_cpu_release(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Synchronise with the boot thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) raw_spin_lock(&versatile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) raw_spin_unlock(&versatile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Set synchronisation state between this boot processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * and the secondary one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) raw_spin_lock(&versatile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * This is really belt and braces; we hold unintended secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * CPUs in the holding pen until we're ready for them. However,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * since we haven't sent them a soft interrupt, they shouldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * be there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) versatile_write_cpu_release(cpu_logical_map(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Send the secondary CPU a soft interrupt, thereby causing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * the boot monitor to read the system wide flags register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * and branch to the address found there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) arch_send_wakeup_ipi_mask(cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) timeout = jiffies + (1 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) while (time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (versatile_cpu_release == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * now the secondary core is starting up let it run its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * calibrations, then wait for it to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) raw_spin_unlock(&versatile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return versatile_cpu_release != -1 ? -ENOSYS : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }