^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * OpenRISC time.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Linux architectural port borrowing liberally from similar works of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * others. All original copyrights apply as per the original source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * declaration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Modifications for the OpenRISC architecture:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/cpuinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Test the timer ticks to count, used in sync routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) inline void openrisc_timer_set(unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) mtspr(SPR_TTCR, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Set the timer to trigger in delta cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) inline void openrisc_timer_set_next(unsigned long delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u32 c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Read 32-bit counter value, add delta, mask off the low 28 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * We're guaranteed delta won't be bigger than 28 bits because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * generic timekeeping code ensures that for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) c = mfspr(SPR_TTCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) c += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) c &= SPR_TTMR_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Set counter and enable interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Keep timer in continuous mode always.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static int openrisc_timer_set_next_event(unsigned long delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct clock_event_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) openrisc_timer_set_next(delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* This is the clock event device based on the OR1K tick timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * As the timer is being used as a continuous clock-source (required for HR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * timers) we cannot enable the PERIODIC feature. The tick timer can run using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * one-shot events, so no problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void openrisc_clockevent_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct clock_event_device *evt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) &per_cpu(clockevent_openrisc_timer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) mtspr(SPR_TTMR, SPR_TTMR_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) evt->broadcast = tick_broadcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) evt->name = "openrisc_timer_clockevent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) evt->features = CLOCK_EVT_FEAT_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) evt->rating = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) evt->set_next_event = openrisc_timer_set_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) evt->cpumask = cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* We only have 28 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) clockevents_config_and_register(evt, cpuinfo->clock_frequency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) 100, 0x0fffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline void timer_ack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Clear the IP bit and disable further interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* This can be done very simply... we just need to keep the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) running, so just maintain the CR bits while clearing the rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) of the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) mtspr(SPR_TTMR, SPR_TTMR_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * The timer interrupt is mostly handled in generic code nowadays... this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * function just acknowledges the interrupt and fires the event handler that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * has been set on the clockevent device by the generic time management code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * This function needs to be called by the timer exception handler and that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * all the exception handler needs to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct pt_regs *old_regs = set_irq_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct clock_event_device *evt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) &per_cpu(clockevent_openrisc_timer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) timer_ack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * update_process_times() expects us to have called irq_enter().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) evt->event_handler(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) set_irq_regs(old_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Clocksource: Based on OpenRISC timer/counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * This sets up the OpenRISC Tick Timer as a clock source. The tick timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * is 32 bits wide and runs at the CPU clock frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static u64 openrisc_timer_read(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return (u64) mfspr(SPR_TTCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static struct clocksource openrisc_timer = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) .name = "openrisc_timer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .rating = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .read = openrisc_timer_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .mask = CLOCKSOURCE_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) .flags = CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static int __init openrisc_timer_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (clocksource_register_hz(&openrisc_timer, cpuinfo->clock_frequency))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) panic("failed to register clocksource");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Enable the incrementer: 'continuous' mode with interrupt disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) mtspr(SPR_TTMR, SPR_TTMR_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void __init time_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u32 upr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) upr = mfspr(SPR_UPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!(upr & SPR_UPR_TTP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) panic("Linux not supported on devices without tick timer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) openrisc_timer_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) openrisc_clockevent_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }