^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012 Regents of the University of California
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2017 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * All RISC-V systems have a timer attached to every hart. These timers can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * either be read from the "time" and "timeh" CSRs, and can use the SBI to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * setup events, or directly accessed using MMIO registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io-64-nonatomic-lo-hi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/sbi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static int riscv_clock_next_event(unsigned long delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct clock_event_device *ce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) csr_set(CSR_IE, IE_TIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) sbi_set_timer(get_cycles64() + delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static unsigned int riscv_clock_event_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) .name = "riscv_timer_clockevent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) .features = CLOCK_EVT_FEAT_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) .rating = 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) .set_next_event = riscv_clock_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * It is guaranteed that all the timers across all the harts are synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * within one tick of each other, so while this could technically go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * backwards when hopping between CPUs, practically it won't happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return get_cycles64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static u64 notrace riscv_sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return get_cycles64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static struct clocksource riscv_clocksource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) .name = "riscv_clocksource",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) .rating = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) .mask = CLOCKSOURCE_MASK(64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) .flags = CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) .read = riscv_clocksource_rdtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int riscv_timer_starting_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ce->cpumask = cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ce->irq = riscv_clock_event_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) enable_percpu_irq(riscv_clock_event_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) irq_get_trigger_type(riscv_clock_event_irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int riscv_timer_dying_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) disable_percpu_irq(riscv_clock_event_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* called directly from the low-level interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) csr_clear(CSR_IE, IE_TIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) evdev->event_handler(evdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int __init riscv_timer_init_dt(struct device_node *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int cpuid, hartid, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct device_node *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) hartid = riscv_of_processor_hartid(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (hartid < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pr_warn("Not valid hartid for node [%pOF] error = [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) n, hartid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return hartid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) cpuid = riscv_hartid_to_cpuid(hartid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (cpuid < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) pr_warn("Invalid cpuid for hartid [%d]\n", hartid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (cpuid != smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) child = of_get_compatible_child(n, "riscv,cpu-intc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pr_err("Failed to find INTC node [%pOF]\n", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) domain = irq_find_host(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) of_node_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pr_err("Failed to find IRQ domain for node [%pOF]\n", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!riscv_clock_event_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) pr_err("Failed to map timer interrupt for node [%pOF]\n", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) __func__, cpuid, hartid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) error, cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) error = request_percpu_irq(riscv_clock_event_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) riscv_timer_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) "riscv-timer", &riscv_clock_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pr_err("registering percpu irq failed [%d]\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) "clockevents/riscv/timer:starting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) riscv_timer_starting_cpu, riscv_timer_dying_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt);