^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Common time routines among all ppc machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by Cort Dougan (cort@cs.nmt.edu) to merge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Paul Mackerras' version and mine for PReP and Pmac.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * to make clock more stable (2.4.0-test5). The only thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * that this code assumes is that the timebases have been synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * by firmware on SMP and are never stopped (never do sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * on SMP then, nap and doze are OK).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Speeded up do_gettimeofday by getting rid of references to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * xtime (which required locks for consistency). (mikejc@us.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * TODO (not necessarily in this file):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * - improve precision and reproducibility of timebase frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * measurement at boot time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * - for astronomical applications: add a new function to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * non ambiguous timestamps even around leap seconds. This needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * a new timestamp format and a good name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * "A Kernel Model for Precision Timekeeping" by Dave Mills
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/param.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/rtc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/posix-timers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/of_clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/sched/cputime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <asm/trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <asm/nvram.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <asm/vdso_datapage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <asm/asm-prototypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* powerpc clocksource/clockevent code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/timekeeper_internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static u64 timebase_read(struct clocksource *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static struct clocksource clocksource_timebase = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .name = "timebase",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) .rating = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .flags = CLOCK_SOURCE_IS_CONTINUOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .mask = CLOCKSOURCE_MASK(64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .read = timebase_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int decrementer_set_next_event(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct clock_event_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int decrementer_shutdown(struct clock_event_device *evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct clock_event_device decrementer_clockevent = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) .name = "decrementer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .rating = 200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .irq = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .set_next_event = decrementer_set_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .set_state_oneshot_stopped = decrementer_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .set_state_shutdown = decrementer_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) .tick_resume = decrementer_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .features = CLOCK_EVT_FEAT_ONESHOT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) CLOCK_EVT_FEAT_C3STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) EXPORT_SYMBOL(decrementer_clockevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) DEFINE_PER_CPU(u64, decrementers_next_tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static DEFINE_PER_CPU(struct clock_event_device, decrementers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define XSEC_PER_SEC (1024*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* compute ((xsec << 12) * max) >> 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned long tb_ticks_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long tb_ticks_per_usec = 100; /* sane default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) EXPORT_SYMBOL(tb_ticks_per_usec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned long tb_ticks_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) DEFINE_SPINLOCK(rtc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) EXPORT_SYMBOL_GPL(rtc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static u64 tb_to_ns_scale __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static unsigned tb_to_ns_shift __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static u64 boot_tb __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) extern struct timezone sys_tz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static long timezone_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long ppc_proc_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) EXPORT_SYMBOL_GPL(ppc_proc_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long ppc_tb_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) EXPORT_SYMBOL_GPL(ppc_tb_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) bool tb_invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Factor for converting from cputime_t (timebase ticks) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * microseconds. This is stored as 0.64 fixed-point binary fraction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u64 __cputime_usec_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) EXPORT_SYMBOL(__cputime_usec_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #ifdef CONFIG_PPC_SPLPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void (*dtl_consumer)(struct dtl_entry *, u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void calc_cputime_factors(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct div_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) __cputime_usec_factor = res.result_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Read the SPURR on systems that have it, otherwise the PURR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * or if that doesn't exist return the timebase value passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static inline unsigned long read_spurr(unsigned long tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (cpu_has_feature(CPU_FTR_SPURR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return mfspr(SPRN_SPURR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (cpu_has_feature(CPU_FTR_PURR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return mfspr(SPRN_PURR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #ifdef CONFIG_PPC_SPLPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #include <asm/dtl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Scan the dispatch trace log and count up the stolen time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Should be called with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static u64 scan_dispatch_log(u64 stop_tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u64 i = local_paca->dtl_ridx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct dtl_entry *dtl = local_paca->dtl_curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct lppaca *vpa = local_paca->lppaca_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u64 tb_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u64 stolen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u64 dtb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!dtl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (i == be64_to_cpu(vpa->dtl_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) while (i < be64_to_cpu(vpa->dtl_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dtb = be64_to_cpu(dtl->timebase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) be32_to_cpu(dtl->ready_to_enqueue_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* buffer has overflowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (dtb > stop_tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (dtl_consumer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dtl_consumer(dtl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) stolen += tb_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ++dtl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (dtl == dtl_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dtl = local_paca->dispatch_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) local_paca->dtl_ridx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) local_paca->dtl_curr = dtl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return stolen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Accumulate stolen time by scanning the dispatch trace log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Called on entry from user mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void notrace accumulate_stolen_time(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u64 sst, ust;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long save_irq_soft_mask = irq_soft_mask_return();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct cpu_accounting_data *acct = &local_paca->accounting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* We are called early in the exception entry, before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * soft/hard_enabled are sync'ed to the expected state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * for the exception. We are hard disabled but the PACA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * needs to reflect that so various debug stuff doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * complain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) irq_soft_mask_set(IRQS_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) sst = scan_dispatch_log(acct->starttime_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ust = scan_dispatch_log(acct->starttime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) acct->stime -= sst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) acct->utime -= ust;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) acct->steal_time += ust + sst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) irq_soft_mask_set(save_irq_soft_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline u64 calculate_stolen_time(u64 stop_tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!firmware_has_feature(FW_FEATURE_SPLPAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return scan_dispatch_log(stop_tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #else /* CONFIG_PPC_SPLPAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static inline u64 calculate_stolen_time(u64 stop_tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #endif /* CONFIG_PPC_SPLPAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * Account time for a transition between system, hard irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * or soft irq state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long now, unsigned long stime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned long stime_scaled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned long nowscaled, deltascaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) unsigned long utime, utime_scaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) nowscaled = read_spurr(now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) deltascaled = nowscaled - acct->startspurr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) acct->startspurr = nowscaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) utime = acct->utime - acct->utime_sspurr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) acct->utime_sspurr = acct->utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Because we don't read the SPURR on every kernel entry/exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * deltascaled includes both user and system SPURR ticks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * Apportion these ticks to system SPURR ticks and user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * SPURR ticks in the same ratio as the system time (delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * and user time (udelta) values obtained from the timebase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * over the same interval. The system ticks get accounted here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * the user ticks get saved up in paca->user_time_scaled to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * used by account_process_tick.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) stime_scaled = stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) utime_scaled = utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (deltascaled != stime + utime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (utime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) stime_scaled = deltascaled * stime / (stime + utime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) utime_scaled = deltascaled - stime_scaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) stime_scaled = deltascaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) acct->utime_scaled += utime_scaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return stime_scaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static unsigned long vtime_delta(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned long *stime_scaled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned long *steal_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned long now, stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct cpu_accounting_data *acct = get_accounting(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) WARN_ON_ONCE(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) now = mftb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) stime = now - acct->starttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) acct->starttime = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) *stime_scaled = vtime_delta_scaled(acct, now, stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *steal_time = calculate_stolen_time(now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) void vtime_account_kernel(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned long stime, stime_scaled, steal_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct cpu_accounting_data *acct = get_accounting(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) stime = vtime_delta(tsk, &stime_scaled, &steal_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) stime -= min(stime, steal_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) acct->steal_time += steal_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if ((tsk->flags & PF_VCPU) && !irq_count()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) acct->gtime += stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) acct->utime_scaled += stime_scaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (hardirq_count())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) acct->hardirq_time += stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) else if (in_serving_softirq())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) acct->softirq_time += stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) acct->stime += stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) acct->stime_scaled += stime_scaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) EXPORT_SYMBOL_GPL(vtime_account_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) void vtime_account_idle(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned long stime, stime_scaled, steal_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct cpu_accounting_data *acct = get_accounting(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) stime = vtime_delta(tsk, &stime_scaled, &steal_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) acct->idle_time += stime + steal_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static void vtime_flush_scaled(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct cpu_accounting_data *acct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (acct->utime_scaled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (acct->stime_scaled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) acct->utime_scaled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) acct->utime_sspurr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) acct->stime_scaled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Account the whole cputime accumulated in the paca
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Must be called with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * Assumes that vtime_account_kernel/idle() has been called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * recently (i.e. since the last entry from usermode) so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * get_paca()->user_time_scaled is up to date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) void vtime_flush(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct cpu_accounting_data *acct = get_accounting(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (acct->utime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) account_user_time(tsk, cputime_to_nsecs(acct->utime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (acct->gtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) account_steal_time(cputime_to_nsecs(acct->steal_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) acct->steal_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (acct->idle_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) account_idle_time(cputime_to_nsecs(acct->idle_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (acct->stime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) CPUTIME_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (acct->hardirq_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) CPUTIME_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (acct->softirq_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) CPUTIME_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) vtime_flush_scaled(tsk, acct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) acct->utime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) acct->gtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) acct->idle_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) acct->stime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) acct->hardirq_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) acct->softirq_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) #define calc_cputime_factors()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) void __delay(unsigned long loops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) spin_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (tb_invalid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * TB is in error state and isn't ticking anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * HMI handler was unable to recover from TB error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Return immediately, so that kernel won't get stuck here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) spin_cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) start = mftb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) while (mftb() - start < loops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) spin_cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) spin_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) EXPORT_SYMBOL(__delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) void udelay(unsigned long usecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) __delay(tb_ticks_per_usec * usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) EXPORT_SYMBOL(udelay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) unsigned long profile_pc(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned long pc = instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (in_lock_functions(pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return regs->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) EXPORT_SYMBOL(profile_pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #ifdef CONFIG_IRQ_WORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static inline unsigned long test_irq_work_pending(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unsigned long x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) asm volatile("lbz %0,%1(13)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) : "=r" (x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) : "i" (offsetof(struct paca_struct, irq_work_pending)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static inline void set_irq_work_pending_flag(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) asm volatile("stb %0,%1(13)" : :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) "r" (1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) "i" (offsetof(struct paca_struct, irq_work_pending)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static inline void clear_irq_work_pending(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) asm volatile("stb %0,%1(13)" : :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) "r" (0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) "i" (offsetof(struct paca_struct, irq_work_pending)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #else /* 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) DEFINE_PER_CPU(u8, irq_work_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #endif /* 32 vs 64 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) void arch_irq_work_raise(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * 64-bit code that uses irq soft-mask can just cause an immediate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * interrupt here that gets soft masked, if this is called under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * local_irq_disable(). It might be possible to prevent that happening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * by noticing interrupts are disabled and setting decrementer pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * to be replayed when irqs are enabled. The problem there is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * tracing can call irq_work_raise, including in code that does low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * which could get tangled up if we're messing with the same state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) set_irq_work_pending_flag();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) set_dec(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) #else /* CONFIG_IRQ_WORK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #define test_irq_work_pending() 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #define clear_irq_work_pending()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #endif /* CONFIG_IRQ_WORK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * timer_interrupt - gets called when the decrementer overflows,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) void timer_interrupt(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct clock_event_device *evt = this_cpu_ptr(&decrementers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct pt_regs *old_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Some implementations of hotplug will get timer interrupts while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * offline, just ignore these.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (unlikely(!cpu_online(smp_processor_id()))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) set_dec(decrementer_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Ensure a positive value is written to the decrementer, or else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * some CPUs will continue to take decrementer exceptions. When the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * PPC_WATCHDOG (decrementer based) is configured, keep this at most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * 31 bits, which is about 4 seconds on most systems, which gives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * the watchdog a chance of catching timer interrupt hard lockups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) set_dec(0x7fffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) set_dec(decrementer_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* Conditionally hard-enable interrupts now that the DEC has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * bumped to its maximum value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) may_hard_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (atomic_read(&ppc_n_lost_interrupts) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) do_IRQ(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) old_regs = set_irq_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) trace_timer_interrupt_entry(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (test_irq_work_pending()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) clear_irq_work_pending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) irq_work_run();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) now = get_tb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (now >= *next_tb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) *next_tb = ~(u64)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (evt->event_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) evt->event_handler(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) __this_cpu_inc(irq_stat.timer_irqs_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) now = *next_tb - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (now <= decrementer_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) set_dec(now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* We may have raced with new irq work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (test_irq_work_pending())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) set_dec(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) __this_cpu_inc(irq_stat.timer_irqs_others);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) trace_timer_interrupt_exit(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) set_irq_regs(old_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) EXPORT_SYMBOL(timer_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) void timer_broadcast_interrupt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *next_tb = ~(u64)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) tick_receive_broadcast();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) __this_cpu_inc(irq_stat.broadcast_irqs_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) #ifdef CONFIG_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static void generic_suspend_disable_irqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* Disable the decrementer, so that it doesn't interfere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * with suspending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) set_dec(decrementer_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) set_dec(decrementer_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void generic_suspend_enable_irqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Overrides the weak version in kernel/power/main.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) void arch_suspend_disable_irqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (ppc_md.suspend_disable_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ppc_md.suspend_disable_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) generic_suspend_disable_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* Overrides the weak version in kernel/power/main.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) void arch_suspend_enable_irqs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) generic_suspend_enable_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (ppc_md.suspend_enable_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) ppc_md.suspend_enable_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned long long tb_to_ns(unsigned long long ticks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) EXPORT_SYMBOL_GPL(tb_to_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * Scheduler clock - returns current time in nanosec units.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * Note: mulhdu(a, b) (multiply high double unsigned) returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * are 64-bit unsigned numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) notrace unsigned long long sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * Running clock - attempts to give a view of time passing for a virtualised
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * Uses the VTB register if available otherwise a next best guess.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) unsigned long long running_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * Don't read the VTB as a host since KVM does not switch in host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * timebase into the VTB when it takes a guest off the CPU, reading the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * VTB would result in reading 'last switched out' guest VTB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * would be unsafe to rely only on the #ifdef above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (firmware_has_feature(FW_FEATURE_LPAR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) cpu_has_feature(CPU_FTR_ARCH_207S))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * This is a next best approximation without a VTB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * On a host which is running bare metal there should never be any stolen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * time and on a host which doesn't do any virtualisation TB *should* equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * VTB so it makes no difference anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int __init get_freq(char *name, int cells, unsigned long *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct device_node *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) const __be32 *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* The cpu node should have timebase and clock frequency properties */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) cpu = of_find_node_by_type(NULL, "cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) fp = of_get_property(cpu, name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *val = of_read_ulong(fp, cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) of_node_put(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static void start_cpu_decrementer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) unsigned int tcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* Clear any pending timer interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) tcr = mfspr(SPRN_TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * The watchdog may have already been enabled by u-boot. So leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * TRC[WP] (Watchdog Period) alone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) tcr |= TCR_DIE; /* Enable decrementer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) mtspr(SPRN_TCR, tcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) void __init generic_calibrate_decr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) printk(KERN_ERR "WARNING: Estimating decrementer frequency "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) "(not found)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) printk(KERN_ERR "WARNING: Estimating processor frequency "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) "(not found)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int update_persistent_clock64(struct timespec64 now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct rtc_time tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!ppc_md.set_rtc_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return ppc_md.set_rtc_time(&tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static void __read_persistent_clock(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct rtc_time tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static int first = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ts->tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* XXX this is a litle fragile but will work okay in the short term */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (ppc_md.time_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) timezone_offset = ppc_md.time_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* get_boot_time() isn't guaranteed to be safe to call late */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (ppc_md.get_boot_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (!ppc_md.get_rtc_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ts->tv_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ppc_md.get_rtc_time(&tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ts->tv_sec = rtc_tm_to_time64(&tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) void read_persistent_clock64(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) __read_persistent_clock(ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* Sanitize it in case real time clock is set below EPOCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (ts->tv_sec < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ts->tv_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ts->tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* clocksource code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static notrace u64 timebase_read(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return (u64)get_tb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) void update_vsyscall(struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct timespec64 xt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct clocksource *clock = tk->tkr_mono.clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) u32 mult = tk->tkr_mono.mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) u32 shift = tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) u64 cycle_last = tk->tkr_mono.cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) u64 new_tb_to_xs, new_stamp_xsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) u64 frac_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (clock != &clocksource_timebase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) xt.tv_sec = tk->xtime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* Make userspace gettimeofday spin until we're done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ++vdso_data->tb_update_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * This computes ((2^20 / 1e9) * mult) >> shift as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * 0.64 fixed-point fraction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * The computation in the else clause below won't overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * (as long as the timebase frequency is >= 1.049 MHz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * but loses precision because we lose the low bits of the constant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * in the shift. Note that 19342813113834067 ~= 2^(20+64) / 1e9.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * For a shift of 24 the error is about 0.5e-9, or about 0.5ns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * over a second. (Shift values are usually 22, 23 or 24.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * For high frequency clocks such as the 512MHz timebase clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * on POWER[6789], the mult value is small (e.g. 32768000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * and so we can shift the constant by 16 initially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * (295147905179 ~= 2^(20+64-16) / 1e9) and then do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * remaining shifts after the multiplication, which gives a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * more accurate result (e.g. with mult = 32768000, shift = 24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * the error is only about 1.2e-12, or 0.7ns over 10 minutes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (mult <= 62500000 && clock->shift >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * Compute the fractional second in units of 2^-32 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * in nanoseconds, so multiplying that by 2^32 / 1e9 gives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * it in units of 2^-32 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * We assume shift <= 32 because clocks_calc_mult_shift()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * generates shift values in the range 0 - 32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) do_div(frac_sec, NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * Work out new stamp_xsec value for any legacy users of systemcfg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * stamp_xsec is in units of 2^-20 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) new_stamp_xsec = frac_sec >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * tb_update_count is used to allow the userspace gettimeofday code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * to assure itself that it sees a consistent view of the tb_to_xs and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * stamp_xsec variables. It reads the tb_update_count, then reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * the two values of tb_update_count match and are even then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * tb_to_xs and stamp_xsec values are consistent. If not, then it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * loops back and reads them again until this criteria is met.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) vdso_data->tb_orig_stamp = cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) vdso_data->stamp_xsec = new_stamp_xsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) vdso_data->tb_to_xs = new_tb_to_xs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) vdso_data->stamp_xtime_sec = xt.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) vdso_data->stamp_xtime_nsec = xt.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) vdso_data->stamp_sec_fraction = frac_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) vdso_data->hrtimer_res = hrtimer_resolution;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ++(vdso_data->tb_update_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) void update_vsyscall_tz(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) vdso_data->tz_dsttime = sys_tz.tz_dsttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static void __init clocksource_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct clocksource *clock = &clocksource_timebase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) printk(KERN_ERR "clocksource: %s is already registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) clock->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) clock->name, clock->mult, clock->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static int decrementer_set_next_event(unsigned long evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct clock_event_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) __this_cpu_write(decrementers_next_tb, get_tb() + evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) set_dec(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* We may have raced with new irq work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (test_irq_work_pending())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) set_dec(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static int decrementer_shutdown(struct clock_event_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) decrementer_set_next_event(decrementer_max, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static void register_decrementer_clockevent(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct clock_event_device *dec = &per_cpu(decrementers, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) *dec = decrementer_clockevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) dec->cpumask = cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) dec->name, dec->mult, dec->shift, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* Set values for KVM, see kvm_emulate_dec() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) decrementer_clockevent.mult = dec->mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) decrementer_clockevent.shift = dec->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static void enable_large_decrementer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (!cpu_has_feature(CPU_FTR_ARCH_300))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * If we're running as the hypervisor we need to enable the LD manually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * otherwise firmware should have done it for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (cpu_has_feature(CPU_FTR_HVMODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static void __init set_decrementer_max(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct device_node *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) u32 bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* Prior to ISAv3 the decrementer is always 32 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!cpu_has_feature(CPU_FTR_ARCH_300))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) cpu = of_find_node_by_type(NULL, "cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (bits > 64 || bits < 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* calculate the signed maximum given this many bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) decrementer_max = (1ul << (bits - 1)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) of_node_put(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) pr_info("time_init: %u bit decrementer (max: %llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) bits, decrementer_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static void __init init_decrementer_clockevent(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) register_decrementer_clockevent(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) void secondary_cpu_time_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /* Enable and test the large decrementer for this cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) enable_large_decrementer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /* Start the decrementer on CPUs that have manual control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * such as BookE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) start_cpu_decrementer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /* FIME: Should make unrelatred change to move snapshot_timebase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * call here ! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) register_decrementer_clockevent(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* This function is only called on the boot processor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) void __init time_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct div_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) u64 scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) unsigned shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* Normal PowerPC with timebase register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ppc_md.calibrate_decr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) tb_ticks_per_jiffy = ppc_tb_freq / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) tb_ticks_per_sec = ppc_tb_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) tb_ticks_per_usec = ppc_tb_freq / 1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) calc_cputime_factors();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * Compute scale factor for sched_clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * The calibrate_decr() function has set tb_ticks_per_sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * which is the timebase frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * the 128-bit result as a 64.64 fixed-point number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * We then shift that number right until it is less than 1.0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * giving us the scale factor and shift count to use in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * sched_clock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) scale = res.result_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) for (shift = 0; res.result_high != 0; ++shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) scale = (scale >> 1) | (res.result_high << 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) res.result_high >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) tb_to_ns_scale = scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) tb_to_ns_shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) boot_tb = get_tb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /* If platform provided a timezone (pmac), we correct the time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (timezone_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) sys_tz.tz_minuteswest = -timezone_offset / 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) sys_tz.tz_dsttime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) vdso_data->tb_update_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /* initialise and enable the large decrementer (if we have one) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) set_decrementer_max();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) enable_large_decrementer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* Start the decrementer on CPUs that have manual control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * such as BookE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) start_cpu_decrementer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* Register the clocksource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) clocksource_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) init_decrementer_clockevent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) tick_setup_hrtimer_broadcast();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) of_clk_init(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) enable_sched_clock_irqtime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) void div128_by_32(u64 dividend_high, u64 dividend_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) unsigned divisor, struct div_result *dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) unsigned long a, b, c, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) unsigned long w, x, y, z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) u64 ra, rb, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) a = dividend_high >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) b = dividend_high & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) c = dividend_low >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) d = dividend_low & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) w = a / divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) ra = ((u64)(a - (w * divisor)) << 32) + b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) rb = ((u64) do_div(ra, divisor) << 32) + c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) x = ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) rc = ((u64) do_div(rb, divisor) << 32) + d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) y = rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) do_div(rc, divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) z = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dr->result_high = ((u64)w << 32) + x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) dr->result_low = ((u64)y << 32) + z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* We don't need to calibrate delay, we use the CPU timebase for that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) void calibrate_delay(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* Some generic code (such as spinlock debug) use loops_per_jiffy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * as the number of __delay(1) in a jiffy, so make it so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) loops_per_jiffy = tb_ticks_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ppc_md.get_rtc_time(tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (!ppc_md.set_rtc_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (ppc_md.set_rtc_time(tm) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static const struct rtc_class_ops rtc_generic_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) .read_time = rtc_generic_get_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) .set_time = rtc_generic_set_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static int __init rtc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!ppc_md.get_rtc_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) pdev = platform_device_register_data(NULL, "rtc-generic", -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) &rtc_generic_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) sizeof(rtc_generic_ops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return PTR_ERR_OR_ZERO(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) device_initcall(rtc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) #endif