^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * arch/xtensa/kernel/time.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Timer and clock support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2005 Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Chris Zankel <chris@zankel.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/sched_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long ccount_freq; /* ccount Hz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) EXPORT_SYMBOL(ccount_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static u64 ccount_read(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return (u64)get_ccount();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static u64 notrace ccount_sched_clock_read(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return get_ccount();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static struct clocksource ccount_clocksource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) .name = "ccount",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) .rating = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) .read = ccount_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .mask = CLOCKSOURCE_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) .flags = CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct ccount_timer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct clock_event_device evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int irq_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) char name[24];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int ccount_timer_set_next_event(unsigned long delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct clock_event_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long flags, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) next = get_ccount() + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) set_linux_timer(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (next - get_ccount() > delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * There is no way to disable the timer interrupt at the device level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * only at the intenable register itself. Since enable_irq/disable_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * calls are nested, we need to make sure that these calls are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * balanced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int ccount_timer_shutdown(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct ccount_timer *timer =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) container_of(evt, struct ccount_timer, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (timer->irq_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) disable_irq_nosync(evt->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) timer->irq_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int ccount_timer_set_oneshot(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct ccount_timer *timer =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) container_of(evt, struct ccount_timer, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (!timer->irq_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) enable_irq(evt->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) timer->irq_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static DEFINE_PER_CPU(struct ccount_timer, ccount_timer) = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) .evt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .features = CLOCK_EVT_FEAT_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .rating = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .set_next_event = ccount_timer_set_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .set_state_shutdown = ccount_timer_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .set_state_oneshot = ccount_timer_set_oneshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) .tick_resume = ccount_timer_set_oneshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static irqreturn_t timer_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) set_linux_timer(get_linux_timer());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) evt->event_handler(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Allow platform to do something useful (Wdog). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) platform_heartbeat();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void local_timer_setup(unsigned cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct clock_event_device *clockevent = &timer->evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) timer->irq_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) clockevent->name = timer->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) clockevent->cpumask = cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (WARN(!clockevent->irq, "error: can't map timer irq"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) clockevents_config_and_register(clockevent, ccount_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 0xf, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void __init calibrate_ccount(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct device_node *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) clk = of_clk_get(cpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (!IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ccount_freq = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) pr_warn("%s: CPU input clock not found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pr_warn("%s: CPU node not found in the device tree\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) platform_calibrate_ccount();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static inline void calibrate_ccount(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) platform_calibrate_ccount();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void __init time_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) of_clk_init(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pr_info("Calibrating CPU frequency ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) calibrate_ccount();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pr_cont("%d.%02d MHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (int)ccount_freq / 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) (int)(ccount_freq / 10000) % 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) WARN(!ccount_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) "%s: CPU clock frequency is not set up correctly\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) clocksource_register_hz(&ccount_clocksource, ccount_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) local_timer_setup(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) irq = this_cpu_ptr(&ccount_timer)->evt.irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) pr_err("Failed to request irq %d (timer)\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) timer_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #ifndef CONFIG_GENERIC_CALIBRATE_DELAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void calibrate_delay(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) loops_per_jiffy = ccount_freq / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) loops_per_jiffy / (1000000 / HZ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) (loops_per_jiffy / (10000 / HZ)) % 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #endif