^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/alpha/kernel/smp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Renamed modified smp_call_function to smp_call_function_on_cpu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Created an function that conforms to the old calling convention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * of smp_call_function().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This is helpful for DCPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/hwrpb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "proto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "irq_impl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define DEBUG_SMP 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #if DEBUG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define DBGS(args) printk args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define DBGS(args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* A collection of per-processor data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct cpuinfo_alpha cpu_data[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) EXPORT_SYMBOL(cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* A collection of single bit ipi messages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long bits ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) } ipi_data[NR_CPUS] __cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) enum ipi_message_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) IPI_RESCHEDULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) IPI_CALL_FUNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) IPI_CPU_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Set to a secondary's cpuid when it comes online. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static int smp_secondary_alive = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int smp_num_probed; /* Internal processor count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int smp_num_cpus = 1; /* Number that came online. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) EXPORT_SYMBOL(smp_num_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Called by both boot and secondaries to move global data into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * per-processor storage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static inline void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) smp_store_cpu_info(int cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) cpu_data[cpuid].need_new_asn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) cpu_data[cpuid].asn_lock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) smp_setup_percpu_timer(int cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cpu_data[cpuid].prof_counter = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) cpu_data[cpuid].prof_multiplier = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) wait_boot_cpu_to_stop(int cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long stop = jiffies + 10*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) while (time_before(jiffies, stop)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!smp_secondary_alive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) for (;;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Where secondaries begin a life of C.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) smp_callin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int cpuid = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (cpu_online(cpuid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) printk("??, cpu 0x%x already present??\n", cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) set_cpu_online(cpuid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Turn on machine checks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) wrmces(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Set trap vectors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) trap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Set interrupt vector. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) wrent(entInt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* Get our local ticker going. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) smp_setup_percpu_timer(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) init_clockevent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Call platform-specific callin, if specified */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (alpha_mv.smp_callin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) alpha_mv.smp_callin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* All kernel threads share the same mm context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) mmgrab(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) current->active_mm = &init_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* inform the notifiers about the new cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) notify_cpu_starting(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Must have completely accurate bogos. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Wait boot CPU to stop with irq enabled before running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) calibrate_delay. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) wait_boot_cpu_to_stop(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) calibrate_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) smp_store_cpu_info(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* Allow master to continue only after we written loops_per_jiffy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) smp_secondary_alive = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) cpuid, current, current->active_mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) wait_for_txrdy (unsigned long cpumask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!(hwrpb->txrdy & cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) timeout = jiffies + 10*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) while (time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!(hwrpb->txrdy & cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Send a message to a secondary's console. "START" is one such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * interesting message. ;-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) send_secondary_console_msg(char *str, int cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct percpu_struct *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) register char *cp1, *cp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned long cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) cpu = (struct percpu_struct *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ((char*)hwrpb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) + hwrpb->processor_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) + cpuid * hwrpb->processor_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) cpumask = (1UL << cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (wait_for_txrdy(cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) goto timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) cp2 = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) len = strlen(cp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *(unsigned int *)&cpu->ipc_buffer[0] = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) cp1 = (char *) &cpu->ipc_buffer[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) memcpy(cp1, cp2, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* atomic test and set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) set_bit(cpuid, &hwrpb->rxrdy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (wait_for_txrdy(cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) printk("Processor %x not ready\n", cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * A secondary console wants to send a message. Receive it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) recv_secondary_console_msg(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int mycpu, i, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned long txrdy = hwrpb->txrdy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) char *cp1, *cp2, buf[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct percpu_struct *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) mycpu = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) for (i = 0; i < NR_CPUS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!(txrdy & (1UL << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) DBGS(("recv_secondary_console_msg: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) "TXRDY contains CPU %d.\n", i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cpu = (struct percpu_struct *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ((char*)hwrpb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) + hwrpb->processor_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) + i * hwrpb->processor_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) DBGS(("recv_secondary_console_msg: on %d from %d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) " HALT_REASON 0x%lx FLAGS 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) mycpu, i, cpu->halt_reason, cpu->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) cnt = cpu->ipc_buffer[0] >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (cnt <= 0 || cnt >= 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) strcpy(buf, "<<< BOGUS MSG >>>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) cp1 = (char *) &cpu->ipc_buffer[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) cp2 = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) memcpy(cp2, cp1, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) cp2[cnt] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) while ((cp2 = strchr(cp2, '\r')) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) *cp2 = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (cp2[1] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) cp2[1] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) "message is '%s'\n", mycpu, buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) hwrpb->txrdy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Convince the console to have a secondary cpu begin execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) secondary_cpu_start(int cpuid, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct percpu_struct *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct pcb_struct *hwpcb, *ipcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) cpu = (struct percpu_struct *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ((char*)hwrpb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) + hwrpb->processor_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) + cpuid * hwrpb->processor_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) hwpcb = (struct pcb_struct *) cpu->hwpcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ipcb = &task_thread_info(idle)->pcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Initialize the CPU's HWPCB to something just good enough for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) us to get started. Immediately after starting, we'll swpctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) to the target idle task's pcb. Reuse the stack in the mean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) time. Precalculate the target PCBB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) hwpcb->usp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) hwpcb->ptbr = ipcb->ptbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) hwpcb->pcc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) hwpcb->asn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) hwpcb->unique = virt_to_phys(ipcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) hwpcb->flags = ipcb->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) hwpcb->res1 = hwpcb->res2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) cpuid, idle->state, ipcb->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Setup HWRPB fields that SRM uses to activate secondary CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) hwrpb->CPU_restart = __smp_callin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Recalculate and update the HWRPB checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) hwrpb_update_checksum(hwrpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Send a "start" command to the specified processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* SRM III 3.4.1.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) cpu->flags &= ~1; /* turn off Bootstrap In Progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) send_secondary_console_msg("START\r\n", cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* Wait 10 seconds for an ACK from the console. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) timeout = jiffies + 10*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) while (time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (cpu->flags & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) goto started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) started:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * Bring one cpu online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) smp_boot_one_cpu(int cpuid, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* Signal the secondary to wait a moment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) smp_secondary_alive = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Whirrr, whirrr, whirrrrrrrrr... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (secondary_cpu_start(cpuid, idle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Notify the secondary CPU it can run calibrate_delay. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) smp_secondary_alive = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* We've been acked by the console; wait one second for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) the task to start up for real. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) timeout = jiffies + 1*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) while (time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (smp_secondary_alive == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) goto alive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* We failed to boot the CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) alive:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Another "Red Snapper". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Called from setup_arch. Detect an SMP system and which processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * are present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) setup_smp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct percpu_struct *cpubase, *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (boot_cpuid != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) boot_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (hwrpb->nr_processors > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int boot_cpu_palrev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) DBGS(("setup_smp: nr_processors %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) hwrpb->nr_processors));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) cpubase = (struct percpu_struct *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ((char*)hwrpb + hwrpb->processor_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) boot_cpu_palrev = cpubase->pal_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) for (i = 0; i < hwrpb->nr_processors; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) cpu = (struct percpu_struct *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ((char *)cpubase + i*hwrpb->processor_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if ((cpu->flags & 0x1cc) == 0x1cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) smp_num_probed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) set_cpu_possible(i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) set_cpu_present(i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) cpu->pal_revision = boot_cpu_palrev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) i, cpu->flags, cpu->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) i, cpu->pal_revision));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) smp_num_probed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Called by smp_init prepare the secondaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* Take care of some initial bookkeeping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) memset(ipi_data, 0, sizeof(ipi_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) current_thread_info()->cpu = boot_cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) smp_store_cpu_info(boot_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) smp_setup_percpu_timer(boot_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* Nothing to do on a UP box, or when told not to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (smp_num_probed == 1 || max_cpus == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) init_cpu_possible(cpumask_of(boot_cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) init_cpu_present(cpumask_of(boot_cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) printk(KERN_INFO "SMP mode deactivated.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) printk(KERN_INFO "SMP starting up secondaries.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) smp_num_cpus = smp_num_probed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) __cpu_up(unsigned int cpu, struct task_struct *tidle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) smp_boot_one_cpu(cpu, tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return cpu_online(cpu) ? 0 : -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) smp_cpus_done(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unsigned long bogosum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) for(cpu = 0; cpu < NR_CPUS; cpu++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) bogosum += cpu_data[cpu].loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) printk(KERN_INFO "SMP: Total of %d processors activated "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) "(%lu.%02lu BogoMIPS).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) num_online_cpus(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) (bogosum + 2500) / (500000/HZ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ((bogosum + 2500) / (5000/HZ)) % 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) setup_profiling_timer(unsigned int multiplier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) for_each_cpu(i, to_whom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) set_bit(operation, &ipi_data[i].bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) for_each_cpu(i, to_whom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) wripir(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) handle_ipi(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned long ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) this_cpu, *pending_ipis, regs->pc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) mb(); /* Order interrupt and bit testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) while ((ops = xchg(pending_ipis, 0)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) mb(); /* Order bit clearing and data access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) unsigned long which;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) which = ops & -ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ops &= ~which;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) which = __ffs(which);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) case IPI_RESCHEDULE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) case IPI_CALL_FUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) generic_smp_call_function_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) case IPI_CPU_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) this_cpu, which);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) } while (ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) mb(); /* Order data access and bit testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) cpu_data[this_cpu].ipi_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (hwrpb->txrdy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) recv_secondary_console_msg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) smp_send_reschedule(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #ifdef DEBUG_IPI_MSG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (cpu == hard_smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) "smp_send_reschedule: Sending IPI to self.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) smp_send_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) cpumask_t to_whom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) cpumask_copy(&to_whom, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) cpumask_clear_cpu(smp_processor_id(), &to_whom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) #ifdef DEBUG_IPI_MSG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (hard_smp_processor_id() != boot_cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) send_ipi_message(&to_whom, IPI_CPU_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) send_ipi_message(mask, IPI_CALL_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) void arch_send_call_function_single_ipi(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ipi_imb(void *ignored)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) imb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) smp_imb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* Must wait other processors to flush their icache before continue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) on_each_cpu(ipi_imb, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) EXPORT_SYMBOL(smp_imb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ipi_flush_tlb_all(void *ignored)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) tbia();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Although we don't have any data to pass, we do want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) synchronize with the other processors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) on_each_cpu(ipi_flush_tlb_all, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ipi_flush_tlb_mm(void *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct mm_struct *mm = (struct mm_struct *) x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (mm == current->active_mm && !asn_locked())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) flush_tlb_current(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) flush_tlb_other(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (mm == current->active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) flush_tlb_current(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (atomic_read(&mm->mm_users) <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int cpu, this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) for (cpu = 0; cpu < NR_CPUS; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (!cpu_online(cpu) || cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (mm->context[cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) mm->context[cpu] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) smp_call_function(ipi_flush_tlb_mm, mm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) EXPORT_SYMBOL(flush_tlb_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct flush_tlb_page_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ipi_flush_tlb_page(void *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct mm_struct * mm = data->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (mm == current->active_mm && !asn_locked())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) flush_tlb_current_page(mm, data->vma, data->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) flush_tlb_other(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct flush_tlb_page_struct data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (mm == current->active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) flush_tlb_current_page(mm, vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (atomic_read(&mm->mm_users) <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int cpu, this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) for (cpu = 0; cpu < NR_CPUS; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (!cpu_online(cpu) || cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (mm->context[cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) mm->context[cpu] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) data.vma = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) data.mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) data.addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) smp_call_function(ipi_flush_tlb_page, &data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) EXPORT_SYMBOL(flush_tlb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* On the Alpha we always flush the whole user tlb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) flush_tlb_mm(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) EXPORT_SYMBOL(flush_tlb_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ipi_flush_icache_page(void *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct mm_struct *mm = (struct mm_struct *) x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (mm == current->active_mm && !asn_locked())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) __load_new_mm_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) flush_tlb_other(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) unsigned long addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if ((vma->vm_flags & VM_EXEC) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (mm == current->active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) __load_new_mm_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (atomic_read(&mm->mm_users) <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int cpu, this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) for (cpu = 0; cpu < NR_CPUS; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!cpu_online(cpu) || cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (mm->context[cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) mm->context[cpu] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) smp_call_function(ipi_flush_icache_page, mm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }