^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/hotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/octeon/octeon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "octeon_boot.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) volatile unsigned long octeon_processor_boot = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) volatile unsigned long octeon_processor_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) volatile unsigned long octeon_processor_gp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) volatile unsigned long octeon_processor_relocated_kernel_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif /* CONFIG_RELOCATABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) uint64_t octeon_bootloader_entry_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) EXPORT_SYMBOL(octeon_bootloader_entry_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) extern void kernel_entry(unsigned long arg1, ...);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static void octeon_icache_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) asm volatile ("synci 0($0)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void (*octeon_message_functions[8])(void) = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) scheduler_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) generic_smp_call_function_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) octeon_icache_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u64 action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Make sure the function array initialization remains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Load the mailbox register to figure out what we're supposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) action = cvmx_read_csr(mbox_clrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (OCTEON_IS_MODEL(OCTEON_CN68XX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) action &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) action &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Clear the mailbox to clear the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) cvmx_write_csr(mbox_clrx, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (action & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void (*fn)(void) = octeon_message_functions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) fn();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) action >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Cause the function described by call_data to be executed on the passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * cpu. When the function has finished, increment the finished field of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * call_data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void octeon_send_ipi_single(int cpu, unsigned int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int coreid = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) coreid, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static inline void octeon_send_ipi_mask(const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) for_each_cpu(i, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) octeon_send_ipi_single(i, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Detect available CPUs, populate cpu_possible_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void octeon_smp_hotplug_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct linux_app_boot_info *labi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!setup_max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (labi->labi_signature != LABI_SIGNATURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void __init octeon_smp_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) const int coreid = cvmx_get_core_num();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int core_mask = octeon_get_boot_coremask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned int num_cores = cvmx_octeon_num_cores();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* The present CPUs are initially just the boot cpu (CPU 0). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) for (id = 0; id < NR_CPUS; id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) set_cpu_possible(id, id == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) set_cpu_present(id, id == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) __cpu_number_map[coreid] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __cpu_logical_map[0] = coreid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* The present CPUs get the lowest CPU numbers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) cpus = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) for (id = 0; id < NR_CPUS; id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) set_cpu_possible(cpus, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) set_cpu_present(cpus, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) __cpu_number_map[id] = cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) __cpu_logical_map[cpus] = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * The possible CPUs are all those present on the chip. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * will assign CPU numbers for possible cores as well. Cores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * are always consecutively numberd from 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) id < num_cores && id < NR_CPUS; id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!(core_mask & (1 << id))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) set_cpu_possible(cpus, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) __cpu_number_map[id] = cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) __cpu_logical_map[cpus] = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) octeon_smp_hotplug_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int plat_post_relocation(long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned long entry = (unsigned long)kernel_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Send secondaries into relocated kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) octeon_processor_relocated_kernel_entry = entry + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #endif /* CONFIG_RELOCATABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Firmware CPU startup hook
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int octeon_boot_secondary(int cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) cpu_logical_map(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) octeon_processor_sp = __KSTK_TOS(idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) octeon_processor_gp = (unsigned long)(task_thread_info(idle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) octeon_processor_boot = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) count = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) while (octeon_processor_sp && count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* Waiting for processor to get the SP and GP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pr_err("Secondary boot timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * After we've done initial boot, this function is called to allow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * board code to clean up state, if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void octeon_init_secondary(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned int sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) sr = set_c0_status(ST0_BEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) write_c0_ebase((u32)ebase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) write_c0_status(sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) octeon_check_cpu_bist();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) octeon_init_cvmcount();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) octeon_irq_setup_secondary();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Callout to firmware before smp_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void __init octeon_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Only the low order mailbox bits are used for IPIs, leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * the other bits alone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) mailbox_interrupt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * Last chance for the board code to finish SMP initialization before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * the CPU is "online".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void octeon_smp_finish(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) octeon_user_io_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* to generate the first CPU timer interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* State of each CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static DEFINE_PER_CPU(int, cpu_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int octeon_cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!octeon_bootloader_entry_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) set_cpu_online(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) calculate_cpu_foreign_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) octeon_fixup_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) __flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void octeon_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int coreid = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) uint32_t mask, new_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) const struct cvmx_bootmem_named_block_desc *block_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) while (per_cpu(cpu_state, cpu) != CPU_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * This is a bit complicated strategics of getting/settig available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * cores mask, copied from bootloader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mask = 1 << coreid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (!block_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct linux_app_boot_info *labi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) labi->avail_coremask |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) new_mask = labi->avail_coremask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) } else { /* alternative, already initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) *p |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) new_mask = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) cvmx_write_csr(CVMX_CIU_PP_RST, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) void play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int cpu = cpu_number_map(cvmx_get_core_num());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) idle_task_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) octeon_processor_boot = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) per_cpu(cpu_state, cpu) = CPU_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) while (1) /* core will be reset here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static void start_after_reset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int octeon_update_boot_vector(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int coreid = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) uint32_t avail_coremask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) const struct cvmx_bootmem_named_block_desc *block_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct boot_init_vector *boot_vect =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!block_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct linux_app_boot_info *labi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) avail_coremask = labi->avail_coremask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) labi->avail_coremask &= ~(1 << coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) } else { /* alternative, already initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!(avail_coremask & (1 << coreid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* core not available, assume, that caught by simple-executive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cvmx_write_csr(CVMX_CIU_PP_RST, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) boot_vect[coreid].app_start_func_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) (uint32_t) (unsigned long) start_after_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int register_cavium_notifier(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) "mips/cavium:prepare",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) octeon_update_boot_vector, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) late_initcall(register_cavium_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #endif /* CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static const struct plat_smp_ops octeon_smp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) .send_ipi_single = octeon_send_ipi_single,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) .send_ipi_mask = octeon_send_ipi_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) .init_secondary = octeon_init_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) .smp_finish = octeon_smp_finish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) .boot_secondary = octeon_boot_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) .smp_setup = octeon_smp_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) .prepare_cpus = octeon_prepare_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) .cpu_disable = octeon_cpu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) .cpu_die = octeon_cpu_die,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) #ifdef CONFIG_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) generic_smp_call_function_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) octeon_icache_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Callout to firmware before smp_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (request_irq(OCTEON_IRQ_MBOX0 + 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) octeon_78xx_reched_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) octeon_78xx_reched_interrupt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) panic("Cannot request_irq for SchedulerIPI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (request_irq(OCTEON_IRQ_MBOX0 + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) octeon_78xx_call_function_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) octeon_78xx_call_function_interrupt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) panic("Cannot request_irq for SMP-Call");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (request_irq(OCTEON_IRQ_MBOX0 + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) octeon_78xx_icache_flush_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) octeon_78xx_icache_flush_interrupt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) panic("Cannot request_irq for ICache-Flush");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (action & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) octeon_ciu3_mbox_send(cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) action >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) unsigned int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) for_each_cpu(cpu, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) octeon_78xx_send_ipi_single(cpu, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static const struct plat_smp_ops octeon_78xx_smp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .send_ipi_single = octeon_78xx_send_ipi_single,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .send_ipi_mask = octeon_78xx_send_ipi_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .init_secondary = octeon_init_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .smp_finish = octeon_smp_finish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .boot_secondary = octeon_boot_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) .smp_setup = octeon_smp_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) .prepare_cpus = octeon_78xx_prepare_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) .cpu_disable = octeon_cpu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) .cpu_die = octeon_cpu_die,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #ifdef CONFIG_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) void __init octeon_setup_smp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) const struct plat_smp_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (octeon_has_feature(OCTEON_FEATURE_CIU3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ops = &octeon_78xx_smp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ops = &octeon_smp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) register_smp_ops(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }