^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Actions Semi Owl timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2012 Actions Semi Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Actions Semi, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (c) 2017 SUSE Linux GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author: Andreas Färber
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/irqreturn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define OWL_Tx_CTL 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define OWL_Tx_CMP 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define OWL_Tx_VAL 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define OWL_Tx_CTL_PD BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define OWL_Tx_CTL_INTEN BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define OWL_Tx_CTL_EN BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void __iomem *owl_timer_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static void __iomem *owl_clksrc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static void __iomem *owl_clkevt_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static inline void owl_timer_reset(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) writel(0, base + OWL_Tx_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) writel(0, base + OWL_Tx_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) writel(0, base + OWL_Tx_CMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline void owl_timer_set_enabled(void __iomem *base, bool enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u32 ctl = readl(base + OWL_Tx_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* PD bit is cleared when set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ctl &= ~OWL_Tx_CTL_PD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ctl |= OWL_Tx_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ctl &= ~OWL_Tx_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) writel(ctl, base + OWL_Tx_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static u64 notrace owl_timer_sched_read(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return (u64)readl(owl_clksrc_base + OWL_Tx_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int owl_timer_set_state_shutdown(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) owl_timer_set_enabled(owl_clkevt_base, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static int owl_timer_set_state_oneshot(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) owl_timer_reset(owl_clkevt_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static int owl_timer_tick_resume(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int owl_timer_set_next_event(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct clock_event_device *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void __iomem *base = owl_clkevt_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) owl_timer_set_enabled(base, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) writel(OWL_Tx_CTL_INTEN, base + OWL_Tx_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) writel(0, base + OWL_Tx_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) writel(evt, base + OWL_Tx_CMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) owl_timer_set_enabled(base, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static struct clock_event_device owl_clockevent = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) .name = "owl_tick",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) .rating = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .features = CLOCK_EVT_FEAT_ONESHOT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) CLOCK_EVT_FEAT_DYNIRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .set_state_shutdown = owl_timer_set_state_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .set_state_oneshot = owl_timer_set_state_oneshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .tick_resume = owl_timer_tick_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) .set_next_event = owl_timer_set_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static irqreturn_t owl_timer1_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct clock_event_device *evt = (struct clock_event_device *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) writel(OWL_Tx_CTL_PD, owl_clkevt_base + OWL_Tx_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) evt->event_handler(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int __init owl_timer_init(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int timer1_irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) owl_timer_base = of_io_request_and_map(node, 0, "owl-timer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (IS_ERR(owl_timer_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pr_err("Can't map timer registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return PTR_ERR(owl_timer_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) owl_clksrc_base = owl_timer_base + 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) owl_clkevt_base = owl_timer_base + 0x14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) timer1_irq = of_irq_get_byname(node, "timer1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (timer1_irq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) pr_err("Can't parse timer1 IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) clk = of_clk_get(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ret = PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pr_err("Failed to get clock for clocksource (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) rate = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) owl_timer_reset(owl_clksrc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) owl_timer_set_enabled(owl_clksrc_base, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) sched_clock_register(owl_timer_sched_read, 32, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret = clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) rate, 200, 32, clocksource_mmio_readl_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pr_err("Failed to register clocksource (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) owl_timer_reset(owl_clkevt_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ret = request_irq(timer1_irq, owl_timer1_interrupt, IRQF_TIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) "owl-timer", &owl_clockevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pr_err("failed to request irq %d\n", timer1_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) owl_clockevent.cpumask = cpumask_of(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) owl_clockevent.irq = timer1_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) clockevents_config_and_register(&owl_clockevent, rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 0xf, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);