^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * OR1K timer synchronisation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Based on work from MIPS implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * All CPUs will have their count registers synchronised to the CPU0 next time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * value. This can cause a small timewarp for CPU0. All other CPU's should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * not have done anything significant (but they may have had interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * enabled briefly - prom_smp_finish() should not be responsible for enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * interrupts...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/spr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static unsigned int initcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static atomic_t count_count_start = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static atomic_t count_count_stop = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define COUNTON 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define NR_LOOPS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void synchronise_count_master(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) pr_info("Synchronize counters for CPU %u: ", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * We loop a few times to get a primed instruction cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * then the last pass is more or less synchronised and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * the master and slaves each set their cycle counters to a known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * value all at once. This reduces the chance of having random offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * between the processors, and guarantees that the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * delay between the cycle counters is never bigger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * the latency of information-passing (cachelines) between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * two CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) for (i = 0; i < NR_LOOPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* slaves loop on '!= 2' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) while (atomic_read(&count_count_start) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) atomic_set(&count_count_stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Let the slave writes its count register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) atomic_inc(&count_count_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Count will be initialised to current timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (i == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) initcount = get_cycles();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Everyone initialises count in the last loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (i == NR_LOOPS-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) openrisc_timer_set(initcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Wait for slave to leave the synchronization point:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) while (atomic_read(&count_count_stop) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) atomic_set(&count_count_start, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) atomic_inc(&count_count_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Arrange for an interrupt in a short while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) openrisc_timer_set_next(COUNTON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * i386 code reported the skew here, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * count registers were almost certainly out of sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * so no point in alarming people
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) pr_cont("done.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) void synchronise_count_slave(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Not every cpu is online at the time this gets called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * so we first wait for the master to say everyone is ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) for (i = 0; i < NR_LOOPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) atomic_inc(&count_count_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) while (atomic_read(&count_count_start) != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Everyone initialises count in the last loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (i == NR_LOOPS-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) openrisc_timer_set(initcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) atomic_inc(&count_count_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) while (atomic_read(&count_count_stop) != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Arrange for an interrupt in a short while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) openrisc_timer_set_next(COUNTON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #undef NR_LOOPS