^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Delay loops based on the OpenRISC implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Default to the loop-based delay implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct arm_delay_ops arm_delay_ops __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) .delay = __loop_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) .const_udelay = __loop_const_udelay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) .udelay = __loop_udelay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static const struct delay_timer *delay_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static bool delay_calibrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static u64 delay_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int read_current_timer(unsigned long *timer_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (!delay_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *timer_val = delay_timer->read_current_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) EXPORT_SYMBOL_GPL(read_current_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return (cyc * mult) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void __timer_delay(unsigned long cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) cycles_t start = get_cycles();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) while ((get_cycles() - start) < cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static void __timer_const_udelay(unsigned long xloops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long long loops = xloops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) loops *= arm_delay_ops.ticks_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __timer_delay(loops >> UDELAY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void __timer_udelay(unsigned long usecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) __timer_const_udelay(usecs * UDELAY_MULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void __init register_current_timer_delay(const struct delay_timer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 new_mult, new_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u64 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) clocks_calc_mult_shift(&new_mult, &new_shift, timer->freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) NSEC_PER_SEC, 3600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) res = cyc_to_ns(1ULL, new_mult, new_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (res > 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) pr_err("Ignoring delay timer %ps, which has insufficient resolution of %lluns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) timer, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!delay_calibrated && (!delay_res || (res < delay_res))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pr_info("Switching to timer-based delay loop, resolution %lluns\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) delay_timer = timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) lpj_fine = timer->freq / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) delay_res = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* cpufreq may scale loops_per_jiffy, so keep a private copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) arm_delay_ops.ticks_per_jiffy = lpj_fine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) arm_delay_ops.delay = __timer_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) arm_delay_ops.const_udelay = __timer_const_udelay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) arm_delay_ops.udelay = __timer_udelay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned long calibrate_delay_is_known(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) delay_calibrated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return lpj_fine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void calibration_delay_done(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) delay_calibrated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }