^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Kernel timekeeping code and accessor functions. Based on code from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * timer.c, moved in commit 8524070b7982.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/timekeeper_internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/loadavg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/stop_machine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pvclock_gtod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "tick-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "ntp_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "timekeeping_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define TK_CLEAR_NTP (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define TK_MIRROR (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define TK_CLOCK_WAS_SET (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) enum timekeeping_adv_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Update timekeeper when a tick has passed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) TK_ADV_TICK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Update timekeeper on a direct frequency change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) TK_ADV_FREQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DEFINE_RAW_SPINLOCK(timekeeper_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * The most important data for readout fits into a single 64 byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) seqcount_raw_spinlock_t seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct timekeeper timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) } tk_core ____cacheline_aligned = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static struct timekeeper shadow_timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* flag for if timekeeping is suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int __read_mostly timekeeping_suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * struct tk_fast - NMI safe timekeeper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @seq: Sequence counter for protecting updates. The lowest bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * is the index for the tk_read_base array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @base: tk_read_base array. Access is indexed by the lowest bit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @seq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * See @update_fast_timekeeper() below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct tk_fast {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) seqcount_latch_t seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct tk_read_base base[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Suspend-time cycles value for halted fast timekeeper. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static u64 cycles_at_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static u64 dummy_clock_read(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (timekeeping_suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return cycles_at_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static struct clocksource dummy_clock = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .read = dummy_clock_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Boot time initialization which allows local_clock() to be utilized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * during early boot when clocksources are not available. local_clock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * returns nanoseconds already so no conversion is required, hence mult=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * and shift=0. When the first proper clocksource is installed then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * the fast time keepers are updated with the correct values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define FAST_TK_INIT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .clock = &dummy_clock, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .mask = CLOCKSOURCE_MASK(64), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .mult = 1, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .shift = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static struct tk_fast tk_fast_mono ____cacheline_aligned = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .seq = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .base[0] = FAST_TK_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) .base[1] = FAST_TK_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct tk_fast tk_fast_raw ____cacheline_aligned = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .seq = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .base[0] = FAST_TK_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .base[1] = FAST_TK_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline void tk_normalize_xtime(struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) tk->xtime_sec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) tk->raw_sec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ts.tv_sec = tk->xtime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) tk->xtime_sec = ts->tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) tk->xtime_sec += ts->tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) tk_normalize_xtime(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct timespec64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Verify consistency of: offset_real = -wall_to_monotonic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * before modifying anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) -tk->wall_to_monotonic.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) tk->wall_to_monotonic = wtm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) tk->offs_real = timespec64_to_ktime(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) tk->offs_boot = ktime_add(tk->offs_boot, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Timespec representation for VDSO update to avoid 64bit division
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * on every update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * tk_clock_read - atomic clocksource read() helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * This helper is necessary to use in the read paths because, while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * seqcount ensures we don't return a bad value while structures are updated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * it doesn't protect from potential crashes. There is the possibility that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * the tkr's clocksource may change between the read reference, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * clock reference passed to the read function. This can cause crashes if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * the wrong clocksource is passed to the wrong read function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * This isn't necessary to use when holding the timekeeper_lock or doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * a read of the fast-timekeeper tkrs (which is protected by its own locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * and update logic).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static inline u64 tk_clock_read(const struct tk_read_base *tkr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct clocksource *clock = READ_ONCE(tkr->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return clock->read(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #ifdef CONFIG_DEBUG_TIMEKEEPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u64 max_cycles = tk->tkr_mono.clock->max_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) const char *name = tk->tkr_mono.clock->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (offset > max_cycles) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) offset, name, max_cycles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (offset > (max_cycles >> 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) offset, name, max_cycles >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (tk->underflow_seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (jiffies - tk->last_warning > WARNING_FREQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) printk_deferred(" Your kernel is probably still fine.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) tk->last_warning = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) tk->underflow_seen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (tk->overflow_seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (jiffies - tk->last_warning > WARNING_FREQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) printk_deferred(" Your kernel is probably still fine.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) tk->last_warning = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) tk->overflow_seen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u64 now, last, mask, max, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Since we're called holding a seqcount, the data may shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * under us while we're doing the calculation. This can cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * false positives, since we'd note a problem but throw the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * results away. So nest another seqcount here to atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * grab the points we are checking with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) now = tk_clock_read(tkr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) last = tkr->cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) mask = tkr->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) max = tkr->clock->max_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) delta = clocksource_delta(now, last, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Try to catch underflows by checking if we are seeing small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * mask-relative negative values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (unlikely((~delta & mask) < (mask >> 3))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) tk->underflow_seen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Cap delta value to the max_cycles values to avoid mult overflows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (unlikely(delta > max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) tk->overflow_seen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) delta = tkr->clock->max_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u64 cycle_now, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* read clocksource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) cycle_now = tk_clock_read(tkr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* calculate the delta since the last update_wall_time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * tk_setup_internals - Set up internals to use clocksource clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * @tk: The target timekeeper to setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * @clock: Pointer to clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * pair and interval request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * Unless you're the timekeeping code, you should not be using this!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) u64 interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) u64 tmp, ntpinterval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct clocksource *old_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ++tk->cs_was_changed_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) old_clock = tk->tkr_mono.clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) tk->tkr_mono.clock = clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) tk->tkr_mono.mask = clock->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) tk->tkr_raw.clock = clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) tk->tkr_raw.mask = clock->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Do the ns -> cycle conversion first, using original mult */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) tmp = NTP_INTERVAL_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) tmp <<= clock->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ntpinterval = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) tmp += clock->mult/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) do_div(tmp, clock->mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (tmp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) tmp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) interval = (u64) tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) tk->cycle_interval = interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Go back from cycles -> shifted ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) tk->xtime_interval = interval * clock->mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) tk->xtime_remainder = ntpinterval - tk->xtime_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tk->raw_interval = interval * clock->mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* if changing clocks, convert xtime_nsec shift units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (old_clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int shift_change = clock->shift - old_clock->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (shift_change < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) tk->tkr_mono.xtime_nsec >>= -shift_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) tk->tkr_raw.xtime_nsec >>= -shift_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) tk->tkr_mono.xtime_nsec <<= shift_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) tk->tkr_raw.xtime_nsec <<= shift_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) tk->tkr_mono.shift = clock->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) tk->tkr_raw.shift = clock->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) tk->ntp_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * The timekeeper keeps its own mult values for the currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * active clocksource. These value will be adjusted via NTP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * to counteract clock drifting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) tk->tkr_mono.mult = clock->mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) tk->tkr_raw.mult = clock->mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) tk->ntp_err_mult = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) tk->skip_second_overflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* Timekeeper helper functions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static u32 default_arch_gettimeoffset(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static inline u32 arch_gettimeoffset(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u64 nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) nsec = delta * tkr->mult + tkr->xtime_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) nsec >>= tkr->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* If arch requires, add in get_arch_timeoffset() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return nsec + arch_gettimeoffset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) delta = timekeeping_get_delta(tkr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return timekeeping_delta_to_ns(tkr, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* calculate the delta since the last update_wall_time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return timekeeping_delta_to_ns(tkr, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * @tkr: Timekeeping readout base from which we take the update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * We want to use this from any context including NMI and tracing /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * instrumenting the timekeeping code itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * Employ the latch technique; see @raw_write_seqcount_latch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * So if a NMI hits the update of base[0] then it will use base[1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * which is still consistent. In the worst case this can result is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * slightly wrong timestamp (a few nanoseconds). See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @ktime_get_mono_fast_ns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void update_fast_timekeeper(const struct tk_read_base *tkr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct tk_fast *tkf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct tk_read_base *base = tkf->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Force readers off to base[1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) raw_write_seqcount_latch(&tkf->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Update base[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) memcpy(base, tkr, sizeof(*base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Force readers back to base[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) raw_write_seqcount_latch(&tkf->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Update base[1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) memcpy(base + 1, base, sizeof(*base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * This timestamp is not guaranteed to be monotonic across an update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * The timestamp is calculated by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * now = base_mono + clock_delta * slope
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * So if the update lowers the slope, readers who are forced to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * not yet updated second array are still using the old steeper slope.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * tmono
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * | o n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * | o n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * | u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * | o
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * |o
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * |12345678---> reader order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * o = old slope
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * u = update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * n = new slope
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * So reader 6 will observe time going backwards versus reader 5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * While other CPUs are likely to be able observe that, the only way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * for a CPU local observation is when an NMI hits in the middle of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * the update. Timestamps taken from that NMI context might be ahead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * of the following timestamps. Callers need to be aware of that and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * deal with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct tk_read_base *tkr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) seq = raw_read_seqcount_latch(&tkf->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) tkr = tkf->base + (seq & 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) now = ktime_to_ns(tkr->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) now += timekeeping_delta_to_ns(tkr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) clocksource_delta(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) tk_clock_read(tkr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) tkr->cycle_last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) tkr->mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) } while (read_seqcount_latch_retry(&tkf->seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) u64 ktime_get_mono_fast_ns(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return __ktime_get_fast_ns(&tk_fast_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) u64 ktime_get_raw_fast_ns(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return __ktime_get_fast_ns(&tk_fast_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * To keep it NMI safe since we're accessing from tracing, we're not using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * separate timekeeper with updates to monotonic clock and boot offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * protected with seqcounts. This has the following minor side effects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * (1) Its possible that a timestamp be taken after the boot offset is updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * but before the timekeeper is updated. If this happens, the new boot offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * is added to the old timekeeping making the clock appear to update slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * earlier:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * CPU 0 CPU 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * timekeeping_inject_sleeptime64()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * __timekeeping_inject_sleeptime(tk, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * timestamp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * timekeeping_update(tk, TK_CLEAR_NTP...);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * partially updated. Since the tk->offs_boot update is a rare event, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * should be a rare occurrence which postprocessing should be able to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) u64 notrace ktime_get_boot_fast_ns(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * See comment for __ktime_get_fast_ns() vs. timestamp ordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct tk_read_base *tkr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) u64 basem, baser, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) seq = raw_read_seqcount_latch(&tkf->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) tkr = tkf->base + (seq & 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) basem = ktime_to_ns(tkr->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) baser = ktime_to_ns(tkr->base_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) delta = timekeeping_delta_to_ns(tkr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) clocksource_delta(tk_clock_read(tkr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) tkr->cycle_last, tkr->mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) } while (read_seqcount_latch_retry(&tkf->seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (mono)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *mono = basem + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return baser + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) u64 ktime_get_real_fast_ns(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return __ktime_get_real_fast(&tk_fast_mono, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * ktime_get_fast_timestamps: - NMI safe timestamps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * @snapshot: Pointer to timestamp storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * Stores clock monotonic, boottime and realtime timestamps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * Boot time is a racy access on 32bit systems if the sleep time injection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * happens late during resume and not in timekeeping_resume(). That could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * be avoided by expanding struct tk_read_base with boot offset for 32bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * and adding more overhead to the update. As this is a hard to observe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * once per resume event which can be filtered with reasonable effort using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * the accurate mono/real timestamps, it's probably not worth the trouble.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * Aside of that it might be possible on 32 and 64 bit to observe the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * following when the sleep time injection happens late:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * CPU 0 CPU 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * timekeeping_resume()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * ktime_get_fast_timestamps()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * mono, real = __ktime_get_real_fast()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * inject_sleep_time()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * update boot offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * boot = mono + bootoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * That means that boot time already has the sleep time adjustment, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * real time does not. On the next readout both are in sync again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Preventing this for 64bit is not really feasible without destroying the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * careful cache layout of the timekeeper because the sequence count and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * struct tk_read_base would then need two cache lines instead of one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * Access to the time keeper clock source is disabled accross the innermost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * steps of suspend/resume. The accessors still work, but the timestamps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * are frozen until time keeping is resumed which happens very early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * For regular suspend/resume there is no observable difference vs. sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * clock, but it might affect some of the nasty low level debug printks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * OTOH, access to sched clock is not guaranteed accross suspend/resume on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * all systems either so it depends on the hardware in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * If that turns out to be a real problem then this could be mitigated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * using sched clock in a similar way as during early boot. But it's not as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * trivial as on early boot because it needs some careful protection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * against the clock monotonic timestamp jumping backwards on resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * @tk: Timekeeper to snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * It generally is unsafe to access the clocksource after timekeeping has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * suspended, so take a snapshot of the readout base of @tk and use it as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * fast timekeeper's readout base while suspended. It will return the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * number of cycles every time until timekeeping is resumed at which time the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * proper readout base for the fast timekeeper will be restored automatically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static void halt_fast_timekeeper(const struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static struct tk_read_base tkr_dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) const struct tk_read_base *tkr = &tk->tkr_mono;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) cycles_at_suspend = tk_clock_read(tkr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) tkr_dummy.clock = &dummy_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) tkr_dummy.base_real = tkr->base + tk->offs_real;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) tkr = &tk->tkr_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) tkr_dummy.clock = &dummy_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * pvclock_gtod_register_notifier - register a pvclock timedata update listener
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int pvclock_gtod_register_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) update_pvclock_gtod(tk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * pvclock_gtod_unregister_notifier - unregister a pvclock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * timedata update listener
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * tk_update_leap_state - helper to update the next_leap_ktime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static inline void tk_update_leap_state(struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) tk->next_leap_ktime = ntp_get_next_leap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (tk->next_leap_ktime != KTIME_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* Convert to monotonic time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * Update the ktime_t based scalar nsec members of the timekeeper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static inline void tk_update_ktime_data(struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) u64 seconds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) u32 nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * The xtime based monotonic readout is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * The ktime based monotonic readout is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * nsec = base_mono + now();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) nsec = (u32) tk->wall_to_monotonic.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * The sum of the nanoseconds portions of xtime and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * wall_to_monotonic can be greater/equal one second. Take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * this into account before updating tk->ktime_sec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (nsec >= NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) seconds++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) tk->ktime_sec = seconds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Update the monotonic raw base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* must hold timekeeper_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static void timekeeping_update(struct timekeeper *tk, unsigned int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (action & TK_CLEAR_NTP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) tk->ntp_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ntp_clear();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) tk_update_leap_state(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) tk_update_ktime_data(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) update_vsyscall(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (action & TK_CLOCK_WAS_SET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) tk->clock_was_set_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * The mirroring of the data to the shadow-timekeeper needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * to happen last here to ensure we don't over-write the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * timekeeper structure on the next update with stale data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (action & TK_MIRROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) memcpy(&shadow_timekeeper, &tk_core.timekeeper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) sizeof(tk_core.timekeeper));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * timekeeping_forward_now - update clock to the current time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * Forward the current clock to update its state since the last call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * update_wall_time(). This is useful before significant clock changes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * as it avoids having to deal with this time offset explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static void timekeeping_forward_now(struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) u64 cycle_now, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) cycle_now = tk_clock_read(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) tk->tkr_mono.cycle_last = cycle_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) tk->tkr_raw.cycle_last = cycle_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* If arch requires, add in get_arch_timeoffset() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* If arch requires, add in get_arch_timeoffset() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) tk_normalize_xtime(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * ktime_get_real_ts64 - Returns the time of day in a timespec64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * @ts: pointer to the timespec to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Returns the time of day in a timespec64 (WARN if suspended).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) void ktime_get_real_ts64(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) u64 nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) WARN_ON(timekeeping_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ts->tv_sec = tk->xtime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) nsecs = timekeeping_get_ns(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ts->tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) timespec64_add_ns(ts, nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) EXPORT_SYMBOL(ktime_get_real_ts64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ktime_t ktime_get(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ktime_t base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) u64 nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) WARN_ON(timekeeping_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) base = tk->tkr_mono.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) nsecs = timekeeping_get_ns(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return ktime_add_ns(base, nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) EXPORT_SYMBOL_GPL(ktime_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) u32 ktime_get_resolution_ns(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) u32 nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) WARN_ON(timekeeping_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static ktime_t *offsets[TK_OFFS_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ktime_t ktime_get_with_offset(enum tk_offsets offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ktime_t base, *offset = offsets[offs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) u64 nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) WARN_ON(timekeeping_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) base = ktime_add(tk->tkr_mono.base, *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) nsecs = timekeeping_get_ns(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return ktime_add_ns(base, nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) EXPORT_SYMBOL_GPL(ktime_get_with_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ktime_t base, *offset = offsets[offs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) u64 nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) WARN_ON(timekeeping_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) base = ktime_add(tk->tkr_mono.base, *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return ktime_add_ns(base, nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * ktime_mono_to_any() - convert mononotic time to any other time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * @tmono: time to convert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * @offs: which offset to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ktime_t *offset = offsets[offs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ktime_t tconv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) tconv = ktime_add(tmono, *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return tconv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) EXPORT_SYMBOL_GPL(ktime_mono_to_any);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * ktime_get_raw - Returns the raw monotonic time in ktime_t format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ktime_t ktime_get_raw(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ktime_t base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) u64 nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) base = tk->tkr_raw.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) nsecs = timekeeping_get_ns(&tk->tkr_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return ktime_add_ns(base, nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) EXPORT_SYMBOL_GPL(ktime_get_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * ktime_get_ts64 - get the monotonic clock in timespec64 format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * @ts: pointer to timespec variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * The function calculates the monotonic clock from the realtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * clock and the wall_to_monotonic offset and stores the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * in normalized timespec64 format in the variable pointed to by @ts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) void ktime_get_ts64(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct timespec64 tomono;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) u64 nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) WARN_ON(timekeeping_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ts->tv_sec = tk->xtime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) nsec = timekeeping_get_ns(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) tomono = tk->wall_to_monotonic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) ts->tv_sec += tomono.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ts->tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) timespec64_add_ns(ts, nsec + tomono.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) EXPORT_SYMBOL_GPL(ktime_get_ts64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * Returns the seconds portion of CLOCK_MONOTONIC with a single non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * serialized read. tk->ktime_sec is of type 'unsigned long' so this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * works on both 32 and 64 bit systems. On 32 bit systems the readout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * covers ~136 years of uptime which should be enough to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * premature wrap arounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) time64_t ktime_get_seconds(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) WARN_ON(timekeeping_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return tk->ktime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) EXPORT_SYMBOL_GPL(ktime_get_seconds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * Returns the wall clock seconds since 1970. This replaces the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * get_seconds() interface which is not y2038 safe on 32bit systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * For 64bit systems the fast access to tk->xtime_sec is preserved. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * 32bit systems the access must be protected with the sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * counter to provide "atomic" access to the 64bit tk->xtime_sec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) time64_t ktime_get_real_seconds(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) time64_t seconds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (IS_ENABLED(CONFIG_64BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return tk->xtime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) seconds = tk->xtime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return seconds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * __ktime_get_real_seconds - The same as ktime_get_real_seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * but without the sequence counter protect. This internal function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * is called just when timekeeping lock is already held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) noinstr time64_t __ktime_get_real_seconds(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return tk->xtime_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * @systime_snapshot: pointer to struct receiving the system time snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ktime_t base_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ktime_t base_real;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) u64 nsec_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) u64 nsec_real;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) WARN_ON_ONCE(timekeeping_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) now = tk_clock_read(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) base_real = ktime_add(tk->tkr_mono.base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) tk_core.timekeeper.offs_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) base_raw = tk->tkr_raw.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) systime_snapshot->cycles = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) EXPORT_SYMBOL_GPL(ktime_get_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Scale base by mult/div checking for overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) u64 tmp, rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) tmp = div64_u64_rem(*base, div, &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) tmp *= mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) rem = div64_u64(rem * mult, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) *base = tmp + rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * @history: Snapshot representing start of history
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * @partial_history_cycles: Cycle offset into history (fractional part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * @total_history_cycles: Total history length in cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * @discontinuity: True indicates clock was set on history period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * @ts: Cross timestamp that should be adjusted using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * partial/total ratio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * Helper function used by get_device_system_crosststamp() to correct the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * crosstimestamp corresponding to the start of the current interval to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * system counter value (timestamp point) provided by the driver. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * total_history_* quantities are the total history starting at the provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * reference point and ending at the start of the current interval. The cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * count between the driver timestamp point and the start of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * interval is partial_history_cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static int adjust_historical_crosststamp(struct system_time_snapshot *history,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) u64 partial_history_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) u64 total_history_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) bool discontinuity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct system_device_crosststamp *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) u64 corr_raw, corr_real;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) bool interp_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (total_history_cycles == 0 || partial_history_cycles == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* Interpolate shortest distance from beginning or end of history */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) interp_forward = partial_history_cycles > total_history_cycles / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) partial_history_cycles = interp_forward ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) total_history_cycles - partial_history_cycles :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) partial_history_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * Scale the monotonic raw time delta by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * partial_history_cycles / total_history_cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) corr_raw = (u64)ktime_to_ns(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) ktime_sub(ts->sys_monoraw, history->raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) ret = scale64_check_overflow(partial_history_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) total_history_cycles, &corr_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * If there is a discontinuity in the history, scale monotonic raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * correction by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * mult(real)/mult(raw) yielding the realtime correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * Otherwise, calculate the realtime correction similar to monotonic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * raw calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (discontinuity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) corr_real = mul_u64_u32_div
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) corr_real = (u64)ktime_to_ns(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ktime_sub(ts->sys_realtime, history->real));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) ret = scale64_check_overflow(partial_history_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) total_history_cycles, &corr_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /* Fixup monotonic raw and real time time values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (interp_forward) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ts->sys_realtime = ktime_add_ns(history->real, corr_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * cycle_between - true if test occurs chronologically between before and after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static bool cycle_between(u64 before, u64 test, u64 after)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (test > before && test < after)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (test < before && before > after)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * get_device_system_crosststamp - Synchronously capture system/device timestamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * @get_time_fn: Callback to get simultaneous device time and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * system counter from the device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * @ctx: Context passed to get_time_fn()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * @history_begin: Historical reference point used to interpolate system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * time when counter provided by the driver is before the current interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * @xtstamp: Receives simultaneously captured system and device time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * Reads a timestamp from a device and correlates it to system time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int get_device_system_crosststamp(int (*get_time_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) (ktime_t *device_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct system_counterval_t *sys_counterval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) void *ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct system_time_snapshot *history_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct system_device_crosststamp *xtstamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) struct system_counterval_t system_counterval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) u64 cycles, now, interval_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) unsigned int clock_was_set_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ktime_t base_real, base_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) u64 nsec_real, nsec_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) u8 cs_was_changed_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) bool do_interp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * Try to synchronously capture device time and a system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * counter value calling back into the device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * Verify that the clocksource associated with the captured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * system counter value is the same as the currently installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * timekeeper clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (tk->tkr_mono.clock != system_counterval.cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) cycles = system_counterval.cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * Check whether the system counter value provided by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * device driver is on the current timekeeping interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) now = tk_clock_read(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) interval_start = tk->tkr_mono.cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (!cycle_between(interval_start, cycles, now)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) clock_was_set_seq = tk->clock_was_set_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) cs_was_changed_seq = tk->cs_was_changed_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) cycles = interval_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) do_interp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) do_interp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) base_real = ktime_add(tk->tkr_mono.base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) tk_core.timekeeper.offs_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) base_raw = tk->tkr_raw.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) system_counterval.cycles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) system_counterval.cycles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * Interpolate if necessary, adjusting back from the start of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * current interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (do_interp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) u64 partial_history_cycles, total_history_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) bool discontinuity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * Check that the counter value occurs after the provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * history reference and that the history doesn't cross a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * clocksource change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (!history_begin ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) !cycle_between(history_begin->cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) system_counterval.cycles, cycles) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) history_begin->cs_was_changed_seq != cs_was_changed_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) partial_history_cycles = cycles - system_counterval.cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) total_history_cycles = cycles - history_begin->cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) discontinuity =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) history_begin->clock_was_set_seq != clock_was_set_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) ret = adjust_historical_crosststamp(history_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) partial_history_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) total_history_cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) discontinuity, xtstamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * do_settimeofday64 - Sets the time of day.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * @ts: pointer to the timespec64 variable containing the new time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * Sets the time of day to the new time and update NTP and notify hrtimers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) int do_settimeofday64(const struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) struct timespec64 ts_delta, xt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (!timespec64_valid_settod(ts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) timekeeping_forward_now(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) xt = tk_xtime(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) ts_delta = timespec64_sub(*ts, xt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) tk_set_xtime(tk, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* signal hrtimers about time change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) clock_was_set();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) audit_tk_injoffset(ts_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) EXPORT_SYMBOL(do_settimeofday64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * timekeeping_inject_offset - Adds or subtracts from the current time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * @tv: pointer to the timespec variable containing the offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * Adds or subtracts an offset value from the current time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static int timekeeping_inject_offset(const struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct timespec64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) timekeeping_forward_now(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /* Make sure the proposed value is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) tmp = timespec64_add(tk_xtime(tk), *ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) !timespec64_valid_settod(&tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) tk_xtime_add(tk, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) error: /* even if we error out, we forwarded the time, so call update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* signal hrtimers about time change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) clock_was_set();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * Indicates if there is an offset between the system clock and the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * clock/persistent clock/rtc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int persistent_clock_is_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Adjust the time obtained from the CMOS to be UTC time instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * local time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * This is ugly, but preferable to the alternatives. Otherwise we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * would either need to write a program to do it in /etc/rc (and risk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * confusion if the program gets run more than once; it would also be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * hard to make the program warp the clock precisely n hours) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * compile in the timezone information into the kernel. Bad, bad....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * - TYT, 1992-01-01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * The best thing to do is to keep the CMOS clock in universal time (UTC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * as real UNIX machines always do it. This avoids all headaches about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * daylight saving times and warping kernel clocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) void timekeeping_warp_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (sys_tz.tz_minuteswest != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) struct timespec64 adjust;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) persistent_clock_is_local = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) adjust.tv_sec = sys_tz.tz_minuteswest * 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) adjust.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) timekeeping_inject_offset(&adjust);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) tk->tai_offset = tai_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * change_clocksource - Swaps clocksources if a new one is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * Accumulates current time interval and initializes new clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static int change_clocksource(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) struct clocksource *new, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) new = (struct clocksource *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) timekeeping_forward_now(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * If the cs is in module, get a module reference. Succeeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * for built-in code (owner == NULL) as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (try_module_get(new->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (!new->enable || new->enable(new) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) old = tk->tkr_mono.clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) tk_setup_internals(tk, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (old->disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) old->disable(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) module_put(old->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) module_put(new->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * timekeeping_notify - Install a new clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * @clock: pointer to the clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * This function is called from clocksource.c after a new, better clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * source has been registered. The caller holds the clocksource_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) int timekeeping_notify(struct clocksource *clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (tk->tkr_mono.clock == clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) stop_machine(change_clocksource, clock, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) tick_clock_notify();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return tk->tkr_mono.clock == clock ? 0 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * @ts: pointer to the timespec64 to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * Returns the raw monotonic time (completely un-modified by ntp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) void ktime_get_raw_ts64(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) u64 nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) ts->tv_sec = tk->raw_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) nsecs = timekeeping_get_ns(&tk->tkr_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) ts->tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) timespec64_add_ns(ts, nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) EXPORT_SYMBOL(ktime_get_raw_ts64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) int timekeeping_valid_for_hres(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * timekeeping_max_deferment - Returns max time the clocksource can be deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) u64 timekeeping_max_deferment(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) u64 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) ret = tk->tkr_mono.clock->max_idle_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * read_persistent_clock64 - Return time from the persistent clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * Weak dummy function for arches that do not yet support it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * Reads the time from the battery backed persistent clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * XXX - Do be sure to remove it once all arches implement it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) void __weak read_persistent_clock64(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ts->tv_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) ts->tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * from the boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * Weak dummy function for arches that do not yet support it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * wall_time - current time as returned by persistent clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * boot_offset - offset that is defined as wall_time - boot_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * The default function calculates offset based on the current value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * local_clock(). This way architectures that support sched_clock() but don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * support dedicated boot time clock will provide the best estimate of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * boot time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) void __weak __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) struct timespec64 *boot_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) read_persistent_clock64(wall_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) *boot_offset = ns_to_timespec64(local_clock());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * Flag reflecting whether timekeeping_resume() has injected sleeptime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * The flag starts of false and is only set when a suspend reaches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * timekeeping_suspend(), timekeeping_resume() sets it to false when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * timekeeper clocksource is not stopping across suspend and has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * used to update sleep time. If the timekeeper clocksource has stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * then the flag stays true and is used by the RTC resume code to decide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * whether sleeptime must be injected and if so the flag gets false then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * If a suspend fails before reaching timekeeping_resume() then the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * stays false and prevents erroneous sleeptime injection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static bool suspend_timing_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /* Flag for if there is a persistent clock on this platform */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static bool persistent_clock_exists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * timekeeping_init - Initializes the clocksource and common timekeeping values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) void __init timekeeping_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct timespec64 wall_time, boot_offset, wall_to_mono;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct clocksource *clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (timespec64_valid_settod(&wall_time) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) timespec64_to_ns(&wall_time) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) persistent_clock_exists = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) } else if (timespec64_to_ns(&wall_time) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) pr_warn("Persistent clock returned invalid value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) wall_time = (struct timespec64){0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (timespec64_compare(&wall_time, &boot_offset) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) boot_offset = (struct timespec64){0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * We want set wall_to_mono, so the following is true:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * wall time + wall_to_mono = boot time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) wall_to_mono = timespec64_sub(boot_offset, wall_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) ntp_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) clock = clocksource_default_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (clock->enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) clock->enable(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) tk_setup_internals(tk, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) tk_set_xtime(tk, &wall_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) tk->raw_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) tk_set_wall_to_mono(tk, wall_to_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) /* time in seconds when suspend began for persistent clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static struct timespec64 timekeeping_suspend_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * __timekeeping_inject_sleeptime - Internal function to add sleep interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * @delta: pointer to a timespec delta value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * Takes a timespec offset measuring a suspend interval and properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * adds the sleep offset to the timekeeping variables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) const struct timespec64 *delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (!timespec64_valid_strict(delta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) printk_deferred(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) "__timekeeping_inject_sleeptime: Invalid "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) "sleep delta value!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) tk_xtime_add(tk, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) tk_debug_account_sleep_time(delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * We have three kinds of time sources to use for sleep time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * injection, the preference order is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * 1) non-stop clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * 2) persistent clock (ie: RTC accessible when irqs are off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * 3) RTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * If system has neither 1) nor 2), 3) will be used finally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * If timekeeping has injected sleeptime via either 1) or 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * 3) becomes needless, so in this case we don't need to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * rtc_resume(), and this is what timekeeping_rtc_skipresume()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * means.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) bool timekeeping_rtc_skipresume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return !suspend_timing_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * 1) can be determined whether to use or not only when doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * timekeeping_resume() which is invoked after rtc_suspend(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * so we can't skip rtc_suspend() surely if system has 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * But if system has 2), 2) will definitely be used, so in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * case we don't need to call rtc_suspend(), and this is what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * timekeeping_rtc_skipsuspend() means.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) bool timekeeping_rtc_skipsuspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return persistent_clock_exists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * @delta: pointer to a timespec64 delta value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) * This hook is for architectures that cannot support read_persistent_clock64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * because their RTC/persistent clock is only accessible when irqs are enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * and also don't have an effective nonstop clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * This function should only be called by rtc_resume(), and allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * a suspend offset to be injected into the timekeeping values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) suspend_timing_needed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) timekeeping_forward_now(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) __timekeeping_inject_sleeptime(tk, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) /* signal hrtimers about time change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) clock_was_set();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * timekeeping_resume - Resumes the generic timekeeping subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) void timekeeping_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct clocksource *clock = tk->tkr_mono.clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct timespec64 ts_new, ts_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) u64 cycle_now, nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) bool inject_sleeptime = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) read_persistent_clock64(&ts_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) clockevents_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) clocksource_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * After system resumes, we need to calculate the suspended time and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * compensate it for the OS time. There are 3 sources that could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * used: Nonstop clocksource during suspend, persistent clock and rtc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * One specific platform may have 1 or 2 or all of them, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) * preference will be:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * suspend-nonstop clocksource -> persistent clock -> rtc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * The less preferred source will only be tried if there is no better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * usable source. The rtc part is handled separately in rtc core code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) cycle_now = tk_clock_read(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) nsec = clocksource_stop_suspend_timing(clock, cycle_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (nsec > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ts_delta = ns_to_timespec64(nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) inject_sleeptime = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) inject_sleeptime = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (inject_sleeptime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) suspend_timing_needed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) __timekeeping_inject_sleeptime(tk, &ts_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) /* Re-base the last cycle value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) tk->tkr_mono.cycle_last = cycle_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) tk->tkr_raw.cycle_last = cycle_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) tk->ntp_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) timekeeping_suspended = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) touch_softlockup_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) tick_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) hrtimers_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) int timekeeping_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct timespec64 delta, delta_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) static struct timespec64 old_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) struct clocksource *curr_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) u64 cycle_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) read_persistent_clock64(&timekeeping_suspend_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * On some systems the persistent_clock can not be detected at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * timekeeping_init by its return value, so if we see a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * value returned, update the persistent_clock_exists flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) persistent_clock_exists = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) suspend_timing_needed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) timekeeping_forward_now(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) timekeeping_suspended = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * Since we've called forward_now, cycle_last stores the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * just read from the current clocksource. Save this to potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * use in suspend timing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) curr_clock = tk->tkr_mono.clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) cycle_now = tk->tkr_mono.cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) clocksource_start_suspend_timing(curr_clock, cycle_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (persistent_clock_exists) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * To avoid drift caused by repeated suspend/resumes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * which each can add ~1 second drift error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * try to compensate so the difference in system time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * and persistent_clock time stays close to constant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) delta_delta = timespec64_sub(delta, old_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (abs(delta_delta.tv_sec) >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * if delta_delta is too large, assume time correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * has occurred and set old_delta to the current delta.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) old_delta = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /* Otherwise try to adjust old_system to compensate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) timekeeping_suspend_time =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) timespec64_add(timekeeping_suspend_time, delta_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) timekeeping_update(tk, TK_MIRROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) halt_fast_timekeeper(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) tick_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) clocksource_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) clockevents_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /* sysfs resume/suspend bits for timekeeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) static struct syscore_ops timekeeping_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) .resume = timekeeping_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) .suspend = timekeeping_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) static int __init timekeeping_init_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) register_syscore_ops(&timekeeping_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) device_initcall(timekeeping_init_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * Apply a multiplier adjustment to the timekeeper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) s64 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) s32 mult_adj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) s64 interval = tk->cycle_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (mult_adj == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) } else if (mult_adj == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) interval = -interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) offset = -offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) } else if (mult_adj != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) interval *= mult_adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) offset *= mult_adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) * So the following can be confusing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * To keep things simple, lets assume mult_adj == 1 for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * When mult_adj != 1, remember that the interval and offset values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * have been appropriately scaled so the math is the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * The basic idea here is that we're increasing the multiplier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * by one, this causes the xtime_interval to be incremented by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * one cycle_interval. This is because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * xtime_interval = cycle_interval * mult
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * So if mult is being incremented by one:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * xtime_interval = cycle_interval * (mult + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * Its the same as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * xtime_interval = (cycle_interval * mult) + cycle_interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * Which can be shortened to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * xtime_interval += cycle_interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * So offset stores the non-accumulated cycles. Thus the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * time (in shifted nanoseconds) is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * now = (offset * adj) + xtime_nsec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * Now, even though we're adjusting the clock frequency, we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * to keep time consistent. In other words, we can't jump back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * in time, and we also want to avoid jumping forward in time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * So given the same offset value, we need the time to be the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * both before and after the freq adjustment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * now = (offset * adj_1) + xtime_nsec_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * now = (offset * adj_2) + xtime_nsec_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * So:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * (offset * adj_1) + xtime_nsec_1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) * (offset * adj_2) + xtime_nsec_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * And we know:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * adj_2 = adj_1 + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * So:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * (offset * adj_1) + xtime_nsec_1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * (offset * (adj_1+1)) + xtime_nsec_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * (offset * adj_1) + xtime_nsec_1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * (offset * adj_1) + offset + xtime_nsec_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * Canceling the sides:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * xtime_nsec_1 = offset + xtime_nsec_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * Which gives us:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * xtime_nsec_2 = xtime_nsec_1 - offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * Which simplfies to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * xtime_nsec -= offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) /* NTP adjustment caused clocksource mult overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) tk->tkr_mono.mult += mult_adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) tk->xtime_interval += interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) tk->tkr_mono.xtime_nsec -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * Adjust the timekeeper's multiplier to the correct frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * and also to reduce the accumulated error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) u32 mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * Determine the multiplier from the current NTP tick length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * Avoid expensive division when the tick length doesn't change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (likely(tk->ntp_tick == ntp_tick_length())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) mult = tk->tkr_mono.mult - tk->ntp_err_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) tk->ntp_tick = ntp_tick_length();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) tk->xtime_remainder, tk->cycle_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * If the clock is behind the NTP time, increase the multiplier by 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * to catch up with it. If it's ahead and there was a remainder in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * tick division, the clock will slow down. Otherwise it will stay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * ahead until the tick length changes to a non-divisible value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) mult += tk->ntp_err_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (unlikely(tk->tkr_mono.clock->maxadj &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) > tk->tkr_mono.clock->maxadj))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) printk_once(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) "Adjusting %s more than 11%% (%ld vs %ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * It may be possible that when we entered this function, xtime_nsec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * was very small. Further, if we're slightly speeding the clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * in the code above, its possible the required corrective factor to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * xtime_nsec could cause it to underflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) * Now, since we have already accumulated the second and the NTP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) * subsystem has been notified via second_overflow(), we need to skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * the next update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) tk->xtime_sec--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) tk->skip_second_overflow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * accumulate_nsecs_to_secs - Accumulates nsecs into secs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * Helper function that accumulates the nsecs greater than a second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * from the xtime_nsec field to the xtime_secs field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * It also calls into the NTP code to handle leapsecond processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) unsigned int clock_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) while (tk->tkr_mono.xtime_nsec >= nsecps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) int leap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) tk->tkr_mono.xtime_nsec -= nsecps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) tk->xtime_sec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * Skip NTP update if this second was accumulated before,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * i.e. xtime_nsec underflowed in timekeeping_adjust()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (unlikely(tk->skip_second_overflow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) tk->skip_second_overflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) /* Figure out if its a leap sec and apply if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) leap = second_overflow(tk->xtime_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (unlikely(leap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) tk->xtime_sec += leap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) ts.tv_sec = leap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ts.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) tk_set_wall_to_mono(tk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) timespec64_sub(tk->wall_to_monotonic, ts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) clock_set = TK_CLOCK_WAS_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) return clock_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * logarithmic_accumulation - shifted accumulation of cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * This functions accumulates a shifted interval of cycles into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * a shifted interval nanoseconds. Allows for O(log) accumulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * Returns the unconsumed cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) u32 shift, unsigned int *clock_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) u64 interval = tk->cycle_interval << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) u64 snsec_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) /* If the offset is smaller than a shifted interval, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (offset < interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /* Accumulate one shifted interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) offset -= interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) tk->tkr_mono.cycle_last += interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) tk->tkr_raw.cycle_last += interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) *clock_set |= accumulate_nsecs_to_secs(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /* Accumulate raw time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) tk->tkr_raw.xtime_nsec -= snsec_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) tk->raw_sec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) /* Accumulate error between NTP and clock interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) tk->ntp_error += tk->ntp_tick << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) (tk->ntp_error_shift + shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) * timekeeping_advance - Updates the timekeeper to the current time and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) * current NTP tick length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) static void timekeeping_advance(enum timekeeping_adv_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) struct timekeeper *real_tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) struct timekeeper *tk = &shadow_timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) int shift = 0, maxshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) unsigned int clock_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) /* Make sure we're fully resumed: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (unlikely(timekeeping_suspended))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) offset = real_tk->cycle_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (mode != TK_ADV_TICK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /* Check if there's really nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) /* Do some additional sanity checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) timekeeping_check_update(tk, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * With NO_HZ we may have to accumulate many cycle_intervals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * (think "ticks") worth of time at once. To do this efficiently,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * we calculate the largest doubling multiple of cycle_intervals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * that is smaller than the offset. We then accumulate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * chunk in one go, and then try to consume the next smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * doubled multiple.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) shift = ilog2(offset) - ilog2(tk->cycle_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) shift = max(0, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /* Bound shift to one less than what overflows tick_length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) shift = min(shift, maxshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) while (offset >= tk->cycle_interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) offset = logarithmic_accumulation(tk, offset, shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) &clock_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (offset < tk->cycle_interval<<shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) shift--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) /* Adjust the multiplier to correct NTP error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) timekeeping_adjust(tk, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * Finally, make sure that after the rounding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) * xtime_nsec isn't larger than NSEC_PER_SEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) clock_set |= accumulate_nsecs_to_secs(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * Update the real timekeeper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * We could avoid this memcpy by switching pointers, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * requires changes to all other timekeeper usage sites as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * well, i.e. move the timekeeper pointer getter into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * spinlocked/seqcount protected sections. And we trade this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * memcpy under the tk_core.seq against one before we start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * updating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) timekeeping_update(tk, clock_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) memcpy(real_tk, tk, sizeof(*tk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) /* The memcpy must come last. Do not put anything here! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (clock_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) /* Have to call _delayed version, since in irq context*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) clock_was_set_delayed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * update_wall_time - Uses the current clocksource to increment the wall time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) void update_wall_time(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) timekeeping_advance(TK_ADV_TICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) * getboottime64 - Return the real time of system boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) * @ts: pointer to the timespec64 to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * Returns the wall-time of boot in a timespec64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * This is based on the wall_to_monotonic offset and the total suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * time. Calls to settimeofday will affect the value returned (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * basically means that however wrong your real time clock is at boot time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * you get the right time here).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) void getboottime64(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) *ts = ktime_to_timespec64(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) EXPORT_SYMBOL_GPL(getboottime64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) void ktime_get_coarse_real_ts64(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) *ts = tk_xtime(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) void ktime_get_coarse_ts64(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) struct timespec64 now, mono;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) now = tk_xtime(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) mono = tk->wall_to_monotonic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) now.tv_nsec + mono.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) EXPORT_SYMBOL(ktime_get_coarse_ts64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * Must hold jiffies_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) void do_timer(unsigned long ticks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) jiffies_64 += ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) calc_global_load();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * ktime_get_update_offsets_now - hrtimer helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) * @cwsseq: pointer to check and store the clock was set sequence number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * @offs_real: pointer to storage for monotonic -> realtime offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) * @offs_boot: pointer to storage for monotonic -> boottime offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) * @offs_tai: pointer to storage for monotonic -> clock tai offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * Returns current monotonic time and updates the offsets if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * different.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) * Called from hrtimer_interrupt() or retrigger_next_event()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) ktime_t *offs_boot, ktime_t *offs_tai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) ktime_t base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) u64 nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) seq = read_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) base = tk->tkr_mono.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) nsecs = timekeeping_get_ns(&tk->tkr_mono);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) base = ktime_add_ns(base, nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (*cwsseq != tk->clock_was_set_seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) *cwsseq = tk->clock_was_set_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) *offs_real = tk->offs_real;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) *offs_boot = tk->offs_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) *offs_tai = tk->offs_tai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) /* Handle leapsecond insertion adjustments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (unlikely(base >= tk->next_leap_ktime))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) } while (read_seqcount_retry(&tk_core.seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) static int timekeeping_validate_timex(const struct __kernel_timex *txc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) if (txc->modes & ADJ_ADJTIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) /* singleshot must not be used with any other mode bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (!(txc->modes & ADJ_OFFSET_READONLY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) !capable(CAP_SYS_TIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /* In order to modify anything, you gotta be super-user! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) if (txc->modes && !capable(CAP_SYS_TIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * if the quartz is off by more than 10% then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * something is VERY wrong!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (txc->modes & ADJ_TICK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) (txc->tick < 900000/USER_HZ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) txc->tick > 1100000/USER_HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) if (txc->modes & ADJ_SETOFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) /* In order to inject time, you gotta be super-user! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (!capable(CAP_SYS_TIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * Validate if a timespec/timeval used to inject a time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * offset is valid. Offsets can be postive or negative, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * we don't check tv_sec. The value of the timeval/timespec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * is the sum of its fields,but *NOTE*:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * The field tv_usec/tv_nsec must always be non-negative and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * we can't have more nanoseconds/microseconds than a second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (txc->time.tv_usec < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (txc->modes & ADJ_NANO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (txc->time.tv_usec >= NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) if (txc->time.tv_usec >= USEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) * Check for potential multiplication overflows that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) * only happen on 64-bit systems:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (LLONG_MIN / PPM_SCALE > txc->freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (LLONG_MAX / PPM_SCALE < txc->freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * do_adjtimex() - Accessor function to NTP __do_adjtimex function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) int do_adjtimex(struct __kernel_timex *txc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) struct timekeeper *tk = &tk_core.timekeeper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) struct audit_ntp_data ad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) s32 orig_tai, tai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /* Validate the data before disabling interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) ret = timekeeping_validate_timex(txc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (txc->modes & ADJ_SETOFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) struct timespec64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) delta.tv_sec = txc->time.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) delta.tv_nsec = txc->time.tv_usec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (!(txc->modes & ADJ_NANO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) delta.tv_nsec *= 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) ret = timekeeping_inject_offset(&delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) audit_tk_injoffset(delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) audit_ntp_init(&ad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) ktime_get_real_ts64(&ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) orig_tai = tai = tk->tai_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) ret = __do_adjtimex(txc, &ts, &tai, &ad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (tai != orig_tai) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) __timekeeping_set_tai_offset(tk, tai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) tk_update_leap_state(tk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) audit_ntp_log(&ad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) /* Update the multiplier immediately if frequency was set directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) timekeeping_advance(TK_ADV_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) if (tai != orig_tai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) clock_was_set();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) ntp_notify_cmos_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) #ifdef CONFIG_NTP_PPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * hardpps() - Accessor function to NTP __hardpps function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) raw_spin_lock_irqsave(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) write_seqcount_begin(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) __hardpps(phase_ts, raw_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) write_seqcount_end(&tk_core.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) EXPORT_SYMBOL(hardpps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) #endif /* CONFIG_NTP_PPS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) * xtime_update() - advances the timekeeping infrastructure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) * @ticks: number of ticks, that have elapsed since the last call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) * Must be called with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) void xtime_update(unsigned long ticks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) raw_spin_lock(&jiffies_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) write_seqcount_begin(&jiffies_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) do_timer(ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) write_seqcount_end(&jiffies_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) raw_spin_unlock(&jiffies_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) update_wall_time();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }