^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Ralink RT2880 timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: John Crispin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2013 John Crispin <john@phrozen.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/mach-ralink/ralink_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define TIMER_REG_TMRSTAT 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define TIMER_REG_TMR0LOAD 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define TIMER_REG_TMR0CTL 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define TMRSTAT_TMR0INT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define TMR0CTL_ENABLE BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define TMR0CTL_MODE_PERIODIC BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define TMR0CTL_PRESCALER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define TMR0CTL_PRESCALE_VAL (0xf - TMR0CTL_PRESCALER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define TMR0CTL_PRESCALE_DIV (65536 / BIT(TMR0CTL_PRESCALER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct rt_timer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void __iomem *membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long timer_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long timer_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline void rt_timer_w32(struct rt_timer *rt, u8 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __raw_writel(val, rt->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline u32 rt_timer_r32(struct rt_timer *rt, u8 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return __raw_readl(rt->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static irqreturn_t rt_timer_irq(int irq, void *_rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct rt_timer *rt = (struct rt_timer *) _rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) rt_timer_w32(rt, TIMER_REG_TMRSTAT, TMRSTAT_TMR0INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static int rt_timer_request(struct rt_timer *rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int err = request_irq(rt->irq, rt_timer_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) dev_name(rt->dev), rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) dev_err(rt->dev, "failed to request irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u32 t = TMR0CTL_MODE_PERIODIC | TMR0CTL_PRESCALE_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static int rt_timer_config(struct rt_timer *rt, unsigned long divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (rt->timer_freq < divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) rt->timer_div = rt->timer_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) rt->timer_div = divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int rt_timer_enable(struct rt_timer *rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u32 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) t = rt_timer_r32(rt, TIMER_REG_TMR0CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) t |= TMR0CTL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static int rt_timer_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct rt_timer *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) rt = devm_kzalloc(&pdev->dev, sizeof(*rt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) dev_err(&pdev->dev, "failed to allocate memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) rt->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (rt->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return rt->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) rt->membase = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (IS_ERR(rt->membase))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return PTR_ERR(rt->membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) dev_err(&pdev->dev, "failed get clock rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) rt->timer_freq = clk_get_rate(clk) / TMR0CTL_PRESCALE_DIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!rt->timer_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) rt->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) platform_set_drvdata(pdev, rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) rt_timer_request(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) rt_timer_config(rt, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) rt_timer_enable(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) dev_info(&pdev->dev, "maximum frequency is %luHz\n", rt->timer_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static const struct of_device_id rt_timer_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) { .compatible = "ralink,rt2880-timer" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static struct platform_driver rt_timer_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .probe = rt_timer_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) .name = "rt-timer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) .of_match_table = rt_timer_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) builtin_platform_driver(rt_timer_driver);