^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Time related functions for Hexagon architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/hexagon_vm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define TIMER_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * For the clocksource we need:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * pcycle frequency (600MHz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * For the loops_per_jiffy we need:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * thread/cpu frequency (100MHz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * And for the timer, we need:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * sleep clock rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) cycles_t pcycle_freq_mhz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) cycles_t thread_freq_mhz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) cycles_t sleep_clk_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * release 1.1, and then it's "adjustable" and probably not defaulted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define RTOS_TIMER_INT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define RTOS_TIMER_REGS_ADDR 0xAB000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static struct resource rtos_timer_resources[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .start = RTOS_TIMER_REGS_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .end = RTOS_TIMER_REGS_ADDR+PAGE_SIZE-1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) .flags = IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static struct platform_device rtos_timer_device = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .name = "rtos_timer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) .id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .num_resources = ARRAY_SIZE(rtos_timer_resources),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) .resource = rtos_timer_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* A lot of this stuff should move into a platform specific section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct adsp_hw_timer_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 match; /* Match value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u32 enable; /* [1] - CLR_ON_MATCH_EN, [0] - EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 clear; /* one-shot register that clears the count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Look for "TCX0" for related constants. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static __iomem struct adsp_hw_timer_struct *rtos_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static u64 timer_get_cycles(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return (u64) __vmgettime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static struct clocksource hexagon_clocksource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) .name = "pcycles",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) .rating = 250,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) .read = timer_get_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) .mask = CLOCKSOURCE_MASK(64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) .flags = CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int set_next_event(unsigned long delta, struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Assuming the timer will be disabled when we enter here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) iowrite32(1, &rtos_timer->clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) iowrite32(0, &rtos_timer->clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) iowrite32(delta, &rtos_timer->match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) iowrite32(TIMER_ENABLE, &rtos_timer->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Broadcast mechanism */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void broadcast(const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) send_ipi(mask, IPI_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* XXX Implement set_state_shutdown() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static struct clock_event_device hexagon_clockevent_dev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .name = "clockevent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) .features = CLOCK_EVT_FEAT_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) .rating = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) .irq = RTOS_TIMER_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .set_next_event = set_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .broadcast = broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static DEFINE_PER_CPU(struct clock_event_device, clock_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void setup_percpu_clockdev(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct clock_event_device *dummy_clock_dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) &per_cpu(clock_events, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) memcpy(dummy_clock_dev, ce_dev, sizeof(*dummy_clock_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) INIT_LIST_HEAD(&dummy_clock_dev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dummy_clock_dev->cpumask = cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) clockevents_register_device(dummy_clock_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Called from smp.c for each CPU's timer ipi call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void ipi_timer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ce_dev->event_handler(ce_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static irqreturn_t timer_interrupt(int irq, void *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) iowrite32(0, &rtos_timer->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ce_dev->event_handler(ce_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * time_init_deferred - called by start_kernel to set up timer/clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Install the IRQ handler for the clock, setup timers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * This is done late, as that way, we can use ioremap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * This runs just before the delay loop is calibrated, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * is used for delay calibration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void __init time_init_deferred(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct resource *resource = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned long flag = IRQF_TIMER | IRQF_TRIGGER_RISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ce_dev->cpumask = cpu_all_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) resource = rtos_timer_device.resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* ioremap here means this has to run later, after paging init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) rtos_timer = ioremap(resource->start, resource_size(resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!rtos_timer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) release_mem_region(resource->start, resource_size(resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) clocksource_register_khz(&hexagon_clocksource, pcycle_freq_mhz * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* Note: the sim generic RTOS clock is apparently really 18750Hz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Last arg is some guaranteed seconds for which the conversion will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * work without overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ce_dev->max_delta_ticks = 0x7fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ce_dev->min_delta_ticks = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) setup_percpu_clockdev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) clockevents_register_device(ce_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (request_irq(ce_dev->irq, timer_interrupt, flag, "rtos_timer", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pr_err("Failed to register rtos_timer interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void __init time_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) late_time_init = time_init_deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void __delay(unsigned long cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned long long start = __vmgettime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) while ((__vmgettime() - start) < cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) EXPORT_SYMBOL(__delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * This could become parametric or perhaps even computed at run-time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * but for now we take the observed simulator jitter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static long long fudgefactor = 350; /* Maybe lower if kernel optimized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void __udelay(unsigned long usecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned long long start = __vmgettime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned long long finish = (pcycle_freq_mhz * usecs) - fudgefactor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) while ((__vmgettime() - start) < finish)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) cpu_relax(); /* not sure how this improves readability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) EXPORT_SYMBOL(__udelay);