^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <soc/at91/atmel_tcb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * We're configured to use a specific TC block, one that's not hooked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * up to external hardware, to provide a time solution:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Two channels combine to create a free-running 32 bit counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * with a base rate of 5+ MHz, packaged as a clocksource (with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * resolution better than 200 nsec).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * - Some chips support 32 bit counter. A single channel is used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * this 32 bit free-running counter. the second channel is not used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - The third channel may be used to provide a clockevent source, used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * either periodic or oneshot mode. For 16-bit counter its runs at 32 KiHZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * and can handle delays of up to two seconds. For 32-bit counters, it runs at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * the same rate as the clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * REVISIT behavior during system suspend states... we should disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * all clocks and save the power. Easily done for clockevent devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * but clocksources won't necessarily get the needed notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * For deeper system sleep states, this will be mandatory...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void __iomem *tcaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 cmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) bool clken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) } tcb_cache[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static u32 bmr_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static u64 tc_get_cycles(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u32 lower, upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) raw_local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) raw_local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return (upper << 16) | lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static u64 tc_get_cycles32(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void tc_clksrc_suspend(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ATMEL_TC_CLKSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static void tc_clksrc_resume(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Restore registers for the channel, RA and RB are not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) writel(0, tcaddr + ATMEL_TC_REG(i, RA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) writel(0, tcaddr + ATMEL_TC_REG(i, RB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Disable all the interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Reenable interrupts that were enabled before suspending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Start the clock if it was used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (tcb_cache[i].clken)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Dual channel, chain channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Finally, trigger all the channels*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static struct clocksource clksrc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) .rating = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) .read = tc_get_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) .mask = CLOCKSOURCE_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) .flags = CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) .suspend = tc_clksrc_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .resume = tc_clksrc_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static u64 notrace tc_sched_clock_read(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return tc_get_cycles(&clksrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static u64 notrace tc_sched_clock_read32(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return tc_get_cycles32(&clksrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static struct delay_timer tc_delay_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static unsigned long tc_delay_timer_read(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return tc_get_cycles(&clksrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static unsigned long notrace tc_delay_timer_read32(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return tc_get_cycles32(&clksrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #ifdef CONFIG_GENERIC_CLOCKEVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct tc_clkevt_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct clock_event_device clkevt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u32 rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return container_of(clkevt, struct tc_clkevt_device, clkevt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static u32 timer_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static int tc_shutdown(struct clock_event_device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct tc_clkevt_device *tcd = to_tc_clkevt(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void __iomem *regs = tcd->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) writel(0xff, regs + ATMEL_TC_REG(2, IDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!clockevent_state_detached(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) clk_disable(tcd->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int tc_set_oneshot(struct clock_event_device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct tc_clkevt_device *tcd = to_tc_clkevt(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void __iomem *regs = tcd->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) tc_shutdown(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) clk_enable(tcd->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* count up to RC, then irq and stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* set_next_event() configures and starts the timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int tc_set_periodic(struct clock_event_device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct tc_clkevt_device *tcd = to_tc_clkevt(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) void __iomem *regs = tcd->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) tc_shutdown(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* By not making the gentime core emulate periodic mode on top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * of oneshot, we get lower overhead and improved accuracy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) clk_enable(tcd->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* count up to RC, then irq and restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) regs + ATMEL_TC_REG(2, CMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) writel((tcd->rate + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Enable clock and interrupts on RC compare */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* go go gadget! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ATMEL_TC_REG(2, CCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int tc_next_event(unsigned long delta, struct clock_event_device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* go go gadget! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) tcaddr + ATMEL_TC_REG(2, CCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static struct tc_clkevt_device clkevt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .clkevt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .features = CLOCK_EVT_FEAT_PERIODIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) CLOCK_EVT_FEAT_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Should be lower than at91rm9200's system timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .rating = 125,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .set_next_event = tc_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) .set_state_shutdown = tc_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .set_state_periodic = tc_set_periodic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) .set_state_oneshot = tc_set_oneshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static irqreturn_t ch2_irq(int irq, void *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct tc_clkevt_device *dev = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) unsigned int sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (sr & ATMEL_TC_CPCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dev->clkevt.event_handler(&dev->clkevt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct clk *t2_clk = tc->clk[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int irq = tc->irq[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int bits = tc->tcb_config->counter_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* try to enable t2 clk to avoid future errors in mode change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ret = clk_prepare_enable(t2_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) clkevt.regs = tc->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) clkevt.clk = t2_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (bits == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) timer_clock = divisor_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) clkevt.rate = clk_get_rate(t2_clk) / atmel_tcb_divisors[divisor_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ret = clk_prepare_enable(tc->slow_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) clk_disable_unprepare(t2_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) clkevt.rate = clk_get_rate(tc->slow_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) timer_clock = ATMEL_TC_TIMER_CLOCK5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) clk_disable(t2_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) clkevt.clkevt.cpumask = cpumask_of(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) clk_unprepare(t2_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (bits != 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) clk_disable_unprepare(tc->slow_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) clockevents_config_and_register(&clkevt.clkevt, clkevt.rate, 1, BIT(bits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #else /* !CONFIG_GENERIC_CLOCKEVENTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* NOTHING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) writel(mck_divisor_idx /* likely divide-by-8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) | ATMEL_TC_WAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) | ATMEL_TC_WAVESEL_UP /* free-run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) tcaddr + ATMEL_TC_REG(0, CMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* channel 1: waveform mode, input TIOA0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) writel(ATMEL_TC_XC1 /* input: TIOA0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) | ATMEL_TC_WAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) | ATMEL_TC_WAVESEL_UP, /* free-run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) tcaddr + ATMEL_TC_REG(1, CMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* chain channel 0 to channel 1*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* then reset all the timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* channel 0: waveform mode, input mclk/8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) writel(mck_divisor_idx /* likely divide-by-8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) | ATMEL_TC_WAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) | ATMEL_TC_WAVESEL_UP, /* free-run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) tcaddr + ATMEL_TC_REG(0, CMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* then reset all the timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static struct atmel_tcb_config tcb_rm9200_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) .counter_width = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static struct atmel_tcb_config tcb_sam9x5_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) .counter_width = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static struct atmel_tcb_config tcb_sama5d2_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .counter_width = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) .has_gclk = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static const struct of_device_id atmel_tcb_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) { .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) { .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) { .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static int __init tcb_clksrc_init(struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct atmel_tc tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct clk *t0_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u64 (*tc_sched_clock)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u32 rate, divided_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int best_divisor_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Protect against multiple calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (tcaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) tc.regs = of_iomap(node->parent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!tc.regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (IS_ERR(t0_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return PTR_ERR(t0_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (IS_ERR(tc.slow_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return PTR_ERR(tc.slow_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) tc.clk[0] = t0_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (IS_ERR(tc.clk[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) tc.clk[1] = t0_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (IS_ERR(tc.clk[2]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) tc.clk[2] = t0_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) tc.irq[2] = of_irq_get(node->parent, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (tc.irq[2] <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) tc.irq[2] = of_irq_get(node->parent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (tc.irq[2] <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) match = of_match_node(atmel_tcb_of_match, node->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) tc.tcb_config = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) bits = tc.tcb_config->counter_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ret = clk_prepare_enable(t0_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) pr_debug("can't enable T0 clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* How fast will we be counting? Pick something over 5 MHz. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) rate = (u32) clk_get_rate(t0_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (tc.tcb_config->has_gclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) for (; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned divisor = atmel_tcb_divisors[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) tmp = rate / divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if ((best_divisor_idx >= 0) && (tmp < 5 * 1000 * 1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) divided_rate = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) best_divisor_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) clksrc.name = kbasename(node->parent->full_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) clkevt.clkevt.name = kbasename(node->parent->full_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ((divided_rate % 1000000) + 500) / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) tcaddr = tc.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (bits == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* use apropriate function to read 32 bit counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) clksrc.read = tc_get_cycles32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* setup ony channel 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) tcb_setup_single_chan(&tc, best_divisor_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) tc_sched_clock = tc_sched_clock_read32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) tc_delay_timer.read_current_timer = tc_delay_timer_read32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* we have three clocks no matter what the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * underlying platform supports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ret = clk_prepare_enable(tc.clk[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pr_debug("can't enable T1 clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto err_disable_t0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* setup both channel 0 & 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) tcb_setup_dual_chan(&tc, best_divisor_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) tc_sched_clock = tc_sched_clock_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) tc_delay_timer.read_current_timer = tc_delay_timer_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* and away we go! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ret = clocksource_register_hz(&clksrc, divided_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto err_disable_t1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* channel 2: periodic and oneshot timer support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ret = setup_clkevents(&tc, best_divisor_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto err_unregister_clksrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) sched_clock_register(tc_sched_clock, 32, divided_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) tc_delay_timer.freq = divided_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) register_current_timer_delay(&tc_delay_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) err_unregister_clksrc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) clocksource_unregister(&clksrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) err_disable_t1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (bits != 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) clk_disable_unprepare(tc.clk[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) err_disable_t0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) clk_disable_unprepare(t0_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) tcaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);