^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/arm/plat-iop/time.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Timer code for IOP32x and IOP33x based systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Deepak Saxena <dsaxena@mvista.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright 2002-2003 MontaVista Software Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/mach/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/mach/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "hardware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "irqs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Minimum clocksource/clockevent timer range in seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define IOP_MIN_RANGE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * IOP clocksource (free-running timer 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static u64 notrace iop_clocksource_read(struct clocksource *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return 0xffffffffu - read_tcr1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static struct clocksource iop_clocksource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .name = "iop_timer1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .rating = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .read = iop_clocksource_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .mask = CLOCKSOURCE_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) .flags = CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * IOP sched_clock() implementation via its clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static u64 notrace iop_read_sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return 0xffffffffu - read_tcr1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * IOP clockevents (interrupting timer 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static int iop_set_next_event(unsigned long delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct clock_event_device *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u32 tmr = IOP_TMR_PRIVILEGED | IOP_TMR_RATIO_1_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) BUG_ON(delta == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) write_tmr0(tmr & ~(IOP_TMR_EN | IOP_TMR_RELOAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) write_tcr0(delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) write_tmr0((tmr & ~IOP_TMR_RELOAD) | IOP_TMR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static unsigned long ticks_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static int iop_set_periodic(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 tmr = read_tmr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) write_tmr0(tmr & ~IOP_TMR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) write_tcr0(ticks_per_jiffy - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) write_trr0(ticks_per_jiffy - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) tmr |= (IOP_TMR_RELOAD | IOP_TMR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) write_tmr0(tmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static int iop_set_oneshot(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 tmr = read_tmr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* ->set_next_event sets period and enables timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) tmr &= ~(IOP_TMR_RELOAD | IOP_TMR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) write_tmr0(tmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int iop_shutdown(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u32 tmr = read_tmr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) tmr &= ~IOP_TMR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) write_tmr0(tmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int iop_resume(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 tmr = read_tmr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) tmr |= IOP_TMR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) write_tmr0(tmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static struct clock_event_device iop_clockevent = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .name = "iop_timer0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .features = CLOCK_EVT_FEAT_PERIODIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) CLOCK_EVT_FEAT_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .rating = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .set_next_event = iop_set_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) .set_state_shutdown = iop_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) .set_state_periodic = iop_set_periodic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) .tick_resume = iop_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) .set_state_oneshot = iop_set_oneshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) iop_timer_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct clock_event_device *evt = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) write_tisr(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) evt->event_handler(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static unsigned long iop_tick_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned long get_iop_tick_rate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return iop_tick_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) EXPORT_SYMBOL(get_iop_tick_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void __init iop_init_time(unsigned long tick_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u32 timer_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int irq = IRQ_IOP32X_TIMER0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) sched_clock_register(iop_read_sched_clock, 32, tick_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) iop_tick_rate = tick_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) timer_ctl = IOP_TMR_EN | IOP_TMR_PRIVILEGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) IOP_TMR_RELOAD | IOP_TMR_RATIO_1_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Set up interrupting clockevent timer 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) write_tmr0(timer_ctl & ~IOP_TMR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) write_tisr(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (request_irq(irq, iop_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) "IOP Timer Tick", &iop_clockevent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) pr_err("Failed to request irq() %d (IOP Timer Tick)\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) iop_clockevent.cpumask = cpumask_of(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) clockevents_config_and_register(&iop_clockevent, tick_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 0xf, 0xfffffffe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Set up free-running clocksource timer 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) write_trr1(0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) write_tcr1(0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) write_tmr1(timer_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) clocksource_register_hz(&iop_clocksource, tick_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }