^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Count register synchronisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * All CPUs will have their count registers synchronised to the CPU0 next time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * value. This can cause a small timewarp for CPU0. All other CPU's should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * not have done anything significant (but they may have had interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * enabled briefly - prom_smp_finish() should not be responsible for enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * interrupts...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/r4k-timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/mipsregs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static unsigned int initcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static atomic_t count_count_start = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static atomic_t count_count_stop = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define COUNTON 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define NR_LOOPS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void synchronise_count_master(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) pr_info("Synchronize counters for CPU %u: ", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * We loop a few times to get a primed instruction cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * then the last pass is more or less synchronised and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * the master and slaves each set their cycle counters to a known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * value all at once. This reduces the chance of having random offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * between the processors, and guarantees that the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * delay between the cycle counters is never bigger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * the latency of information-passing (cachelines) between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * two CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) for (i = 0; i < NR_LOOPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* slaves loop on '!= 2' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) while (atomic_read(&count_count_start) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) atomic_set(&count_count_stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Let the slave writes its count register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) atomic_inc(&count_count_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Count will be initialised to current timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (i == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) initcount = read_c0_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Everyone initialises count in the last loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (i == NR_LOOPS-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) write_c0_count(initcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Wait for slave to leave the synchronization point:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) while (atomic_read(&count_count_stop) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) atomic_set(&count_count_start, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) atomic_inc(&count_count_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Arrange for an interrupt in a short while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) write_c0_compare(read_c0_count() + COUNTON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * i386 code reported the skew here, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * count registers were almost certainly out of sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * so no point in alarming people
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pr_cont("done.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void synchronise_count_slave(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Not every cpu is online at the time this gets called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * so we first wait for the master to say everyone is ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) for (i = 0; i < NR_LOOPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) atomic_inc(&count_count_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) while (atomic_read(&count_count_start) != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Everyone initialises count in the last loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (i == NR_LOOPS-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) write_c0_count(initcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) atomic_inc(&count_count_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) while (atomic_read(&count_count_stop) != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Arrange for an interrupt in a short while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) write_c0_compare(read_c0_count() + COUNTON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #undef NR_LOOPS