^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2013 Pengutronix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define TIMERn_CTRL 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define TIMERn_CTRL_PRESC(val) (((val) & 0xf) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define TIMERn_CTRL_PRESC_1024 TIMERn_CTRL_PRESC(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define TIMERn_CTRL_CLKSEL(val) (((val) & 0x3) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK TIMERn_CTRL_CLKSEL(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define TIMERn_CTRL_OSMEN 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define TIMERn_CTRL_MODE(val) (((val) & 0x3) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define TIMERn_CTRL_MODE_UP TIMERn_CTRL_MODE(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define TIMERn_CTRL_MODE_DOWN TIMERn_CTRL_MODE(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define TIMERn_CMD 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define TIMERn_CMD_START 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define TIMERn_CMD_STOP 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define TIMERn_IEN 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define TIMERn_IF 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define TIMERn_IFS 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define TIMERn_IFC 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define TIMERn_IRQ_UF 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define TIMERn_TOP 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define TIMERn_CNT 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct efm32_clock_event_ddata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct clock_event_device evtdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned periodic_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int efm32_clock_event_shutdown(struct clock_event_device *evtdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct efm32_clock_event_ddata *ddata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int efm32_clock_event_set_oneshot(struct clock_event_device *evtdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct efm32_clock_event_ddata *ddata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) writel_relaxed(TIMERn_CTRL_PRESC_1024 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) TIMERn_CTRL_OSMEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) TIMERn_CTRL_MODE_DOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ddata->base + TIMERn_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static int efm32_clock_event_set_periodic(struct clock_event_device *evtdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct efm32_clock_event_ddata *ddata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) writel_relaxed(TIMERn_CTRL_PRESC_1024 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) TIMERn_CTRL_MODE_DOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ddata->base + TIMERn_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int efm32_clock_event_set_next_event(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct clock_event_device *evtdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct efm32_clock_event_ddata *ddata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) writel_relaxed(evt, ddata->base + TIMERn_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct efm32_clock_event_ddata *ddata = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ddata->evtdev.event_handler(&ddata->evtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static struct efm32_clock_event_ddata clock_event_ddata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .evtdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .name = "efm32 clockevent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) .set_state_shutdown = efm32_clock_event_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) .set_state_periodic = efm32_clock_event_set_periodic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) .set_state_oneshot = efm32_clock_event_set_oneshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) .set_next_event = efm32_clock_event_set_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) .rating = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static int __init efm32_clocksource_init(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) clk = of_clk_get(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ret = PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) pr_err("failed to get clock for clocksource (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) goto err_clk_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ret = clk_prepare_enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pr_err("failed to enable timer clock for clocksource (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) goto err_clk_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) rate = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) base = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (!base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ret = -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pr_err("failed to map registers for clocksource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) goto err_iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) writel_relaxed(TIMERn_CTRL_PRESC_1024 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) DIV_ROUND_CLOSEST(rate, 1024), 200, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) clocksource_mmio_readl_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) pr_err("failed to init clocksource (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) goto err_clocksource_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) err_clocksource_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) iounmap(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) err_iomap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) clk_disable_unprepare(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) err_clk_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) clk_put(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) err_clk_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static int __init efm32_clockevent_init(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) clk = of_clk_get(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) pr_err("failed to get clock for clockevent (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) goto err_clk_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ret = clk_prepare_enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pr_err("failed to enable timer clock for clockevent (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) goto err_clk_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) rate = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) base = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ret = -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pr_err("failed to map registers for clockevent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) goto err_iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) irq = irq_of_parse_and_map(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pr_err("failed to get irq for clockevent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto err_get_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) clock_event_ddata.base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) clockevents_config_and_register(&clock_event_ddata.evtdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) DIV_ROUND_CLOSEST(rate, 1024),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 0xf, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ret = request_irq(irq, efm32_clock_event_handler, IRQF_TIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) "efm32 clockevent", &clock_event_ddata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pr_err("Failed setup irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) goto err_setup_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) err_setup_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) err_get_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) iounmap(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) err_iomap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) clk_disable_unprepare(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) err_clk_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) clk_put(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) err_clk_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * This function asserts that we have exactly one clocksource and one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * clock_event_device in the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static int __init efm32_timer_init(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int has_clocksource, has_clockevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!has_clocksource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ret = efm32_clocksource_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) has_clocksource = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (!has_clockevent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ret = efm32_clockevent_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) has_clockevent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) TIMER_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) TIMER_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);