Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * SMP initialisation and IPI support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Based on arch/arm64/kernel/smp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2015 Regents of the University of California
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2017 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/arch_topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/cpu_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/sbi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include "head.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static DECLARE_COMPLETION(cpu_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) void __init smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	init_cpu_topology();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) void __init smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	int cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/* This covers non-smp usecase mandated by "nosmp" option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (max_cpus == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	for_each_possible_cpu(cpuid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		if (cpuid == smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		if (cpu_ops[cpuid]->cpu_prepare) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		set_cpu_present(cpuid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) void __init setup_smp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	int hart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	bool found_boot_cpu = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	int cpuid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	cpu_set_ops(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	for_each_of_cpu_node(dn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		hart = riscv_of_processor_hartid(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		if (hart < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if (hart == cpuid_to_hartid_map(0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			BUG_ON(found_boot_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			found_boot_cpu = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		if (cpuid >= NR_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 				cpuid, hart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		cpuid_to_hartid_map(cpuid) = hart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		cpuid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	BUG_ON(!found_boot_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (cpuid > nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			cpuid, nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			cpu_set_ops(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			set_cpu_possible(cpuid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static int start_secondary_cpu(int cpu, struct task_struct *tidle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (cpu_ops[cpu]->cpu_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		return cpu_ops[cpu]->cpu_start(cpu, tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int __cpu_up(unsigned int cpu, struct task_struct *tidle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	tidle->thread_info.cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	ret = start_secondary_cpu(cpu, tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		wait_for_completion_timeout(&cpu_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 					    msecs_to_jiffies(1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		if (!cpu_online(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			pr_crit("CPU%u: failed to come online\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		pr_crit("CPU%u: failed to start\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void __init smp_cpus_done(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * C entry point for a secondary processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) asmlinkage __visible void smp_callin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct mm_struct *mm = &init_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	unsigned int curr_cpuid = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	riscv_clear_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	/* All kernel threads share the same mm context.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	mmgrab(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	current->active_mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	notify_cpu_starting(curr_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	update_siblings_masks(curr_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	set_cpu_online(curr_cpuid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	 * Remote TLB flushes are ignored while the CPU is offline, so emit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 * a local TLB flush right now just in case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	complete(&cpu_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	 * Disable preemption before enabling interrupts, so we don't try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	 * schedule a CPU that hasn't actually started yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }