^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * COPYING in the main directory of this source tree, or the NetLogic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/netlogic/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/netlogic/mips-extns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/netlogic/haldefs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/netlogic/common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #if defined(CONFIG_CPU_XLP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/netlogic/xlp-hal/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <asm/netlogic/xlp-hal/xlp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/netlogic/xlp-hal/pic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #elif defined(CONFIG_CPU_XLR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/netlogic/xlr/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/netlogic/xlr/pic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <asm/netlogic/xlr/xlr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #error "Unknown CPU"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void nlm_send_ipi_single(int logical_cpu, unsigned int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int hwtid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) uint64_t picbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* node id is part of hwtid, and needed for send_ipi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) hwtid = cpu_logical_map(logical_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (action & SMP_CALL_FUNCTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_FUNCTION, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (action & SMP_RESCHEDULE_YOURSELF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_RESCHEDULE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) for_each_cpu(cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) nlm_send_ipi_single(cpu, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* IRQ_IPI_SMP_FUNCTION Handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void nlm_smp_function_ipi_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned int irq = irq_desc_get_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) clear_c0_eimr(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ack_c0_eirr(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) generic_smp_call_function_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) set_c0_eimr(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* IRQ_IPI_SMP_RESCHEDULE handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void nlm_smp_resched_ipi_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned int irq = irq_desc_get_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) clear_c0_eimr(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ack_c0_eirr(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) set_c0_eimr(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Called before going into mips code, early cpu init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void nlm_early_init_secondary(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) change_c0_config(CONF_CM_CMASK, 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #ifdef CONFIG_CPU_XLP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) xlp_mmu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) write_c0_ebase(nlm_current_node()->ebase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Code to run on secondary just after probing the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void nlm_init_secondary(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int hwtid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) hwtid = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cpu_set_core(¤t_cpu_data, hwtid / NLM_THREADS_PER_CORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) current_cpu_data.package = nlm_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) nlm_percpu_init(hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) nlm_smp_irq_init(hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void nlm_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* declare we are SMT capable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) smp_num_siblings = nlm_threads_per_core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void nlm_smp_finish(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Boot all other cpus in the system, initialize them, and bring them into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * the boot function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long nlm_next_gp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long nlm_next_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static cpumask_t phys_cpu_present_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) uint64_t picbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int hwtid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) hwtid = cpu_logical_map(logical_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) nlm_next_gp = (unsigned long)task_thread_info(idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* barrier for sp/gp store above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) __sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) nlm_pic_send_ipi(picbase, hwtid, 1, 1); /* NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) void __init nlm_smp_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned int boot_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int num_cpus, i, ncore, node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) boot_cpu = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) cpumask_clear(&phys_cpu_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __cpu_number_map[boot_cpu] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) __cpu_logical_map[0] = boot_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) set_cpu_possible(0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) num_cpus = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) for (i = 0; i < NR_CPUS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * cpu_ready array is not set for the boot_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * it is only set for ASPs (see smpboot.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (cpu_ready[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) cpumask_set_cpu(i, &phys_cpu_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) __cpu_number_map[i] = num_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) __cpu_logical_map[num_cpus] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) set_cpu_possible(num_cpus, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) node = nlm_hwtid_to_node(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ++num_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) pr_info("Physical CPU mask: %*pb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) cpumask_pr_args(&phys_cpu_present_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pr_info("Possible CPU mask: %*pb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) cpumask_pr_args(cpu_possible_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* check with the cores we have woken up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ncore += hweight32(nlm_get_node(i)->coremask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) nlm_threads_per_core, num_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* switch NMI handler to boot CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) nlm_set_nmi_handler(nlm_boot_secondary_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) uint32_t core0_thr_mask, core_thr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int threadmode, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) core0_thr_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) for (i = 0; i < NLM_THREADS_PER_CORE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (cpumask_test_cpu(i, wakeup_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) core0_thr_mask |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) switch (core0_thr_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) nlm_threads_per_core = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) threadmode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) nlm_threads_per_core = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) threadmode = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) case 0xf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) nlm_threads_per_core = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) threadmode = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto unsupp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* Verify other cores CPU masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) core_thr_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) for (j = 0; j < NLM_THREADS_PER_CORE; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (cpumask_test_cpu(i + j, wakeup_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) core_thr_mask |= (1 << j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto unsupp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return threadmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsupp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) panic("Unsupported CPU mask %*pb", cpumask_pr_args(wakeup_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int nlm_wakeup_secondary_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) u32 *reset_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int threadmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* verify the mask and setup core config variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) threadmode = nlm_parse_cpumask(&nlm_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Setup CPU init parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) reset_data = nlm_get_boot_data(BOOT_THREAD_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *reset_data = threadmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #ifdef CONFIG_CPU_XLP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) xlp_wakeup_secondary_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) xlr_wakeup_secondary_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) const struct plat_smp_ops nlm_smp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .send_ipi_single = nlm_send_ipi_single,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .send_ipi_mask = nlm_send_ipi_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .init_secondary = nlm_init_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .smp_finish = nlm_smp_finish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) .boot_secondary = nlm_boot_secondary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) .smp_setup = nlm_smp_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .prepare_cpus = nlm_prepare_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) };