^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Generic userspace implementations of gettimeofday() and similar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <vdso/datapage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <vdso/helpers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef vdso_calc_delta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Default implementation which works for all sane clocksources. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * obviously excludes x86/TSC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) return ((cycles - last) & mask) * mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #ifndef vdso_shift_ns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return ns >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifndef __arch_vdso_hres_capable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static inline bool __arch_vdso_hres_capable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifndef vdso_clocksource_ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return vd->clock_mode != VDSO_CLOCKMODE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #ifndef vdso_cycles_ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline bool vdso_cycles_ok(u64 cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #ifdef CONFIG_TIME_NS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct __kernel_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) const struct vdso_data *vd = __arch_get_timens_vdso_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) const struct timens_offset *offs = &vdns->offset[clk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) const struct vdso_timestamp *vdso_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u64 cycles, last, ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) s64 sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (clk != CLOCK_MONOTONIC_RAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) vd = &vd[CS_HRES_COARSE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) vd = &vd[CS_RAW];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) vdso_ts = &vd->basetime[clk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) seq = vdso_read_begin(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (unlikely(!vdso_clocksource_ok(vd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) cycles = __arch_get_hw_counter(vd->clock_mode, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (unlikely(!vdso_cycles_ok(cycles)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ns = vdso_ts->nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) last = vd->cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ns = vdso_shift_ns(ns, vd->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) sec = vdso_ts->sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) } while (unlikely(vdso_read_retry(vd, seq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Add the namespace offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) sec += offs->sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ns += offs->nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Do this outside the loop: a race inside the loop could result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * in __iter_div_u64_rem() being extremely slow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ts->tv_nsec = ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct __kernel_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct __kernel_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u64 cycles, last, sec, ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Allows to compile the high resolution parts out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!__arch_vdso_hres_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * enabled tasks have a special VVAR page installed which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * has vd->seq set to 1 and vd->clock_mode set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * this does not affect performance because if vd->seq is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * odd, i.e. a concurrent update is in progress the extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * check for vd->clock_mode is just a few extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * instructions while spin waiting for vd->seq to become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * even again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (IS_ENABLED(CONFIG_TIME_NS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return do_hres_timens(vd, clk, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (unlikely(!vdso_clocksource_ok(vd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) cycles = __arch_get_hw_counter(vd->clock_mode, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (unlikely(!vdso_cycles_ok(cycles)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ns = vdso_ts->nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) last = vd->cycle_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ns = vdso_shift_ns(ns, vd->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) sec = vdso_ts->sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) } while (unlikely(vdso_read_retry(vd, seq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Do this outside the loop: a race inside the loop could result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * in __iter_div_u64_rem() being extremely slow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ts->tv_nsec = ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #ifdef CONFIG_TIME_NS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct __kernel_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) const struct vdso_data *vd = __arch_get_timens_vdso_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) const struct timens_offset *offs = &vdns->offset[clk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u64 nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) s64 sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) s32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) seq = vdso_read_begin(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) sec = vdso_ts->sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) nsec = vdso_ts->nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) } while (unlikely(vdso_read_retry(vd, seq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Add the namespace offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) sec += offs->sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) nsec += offs->nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * Do this outside the loop: a race inside the loop could result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * in __iter_div_u64_rem() being extremely slow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ts->tv_nsec = nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct __kernel_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct __kernel_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * do_hres().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) while ((seq = READ_ONCE(vd->seq)) & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (IS_ENABLED(CONFIG_TIME_NS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return do_coarse_timens(vd, clk, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ts->tv_sec = vdso_ts->sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ts->tv_nsec = vdso_ts->nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) } while (unlikely(vdso_read_retry(vd, seq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static __always_inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct __kernel_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Check for negative values or invalid clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (unlikely((u32) clock >= MAX_CLOCKS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * Convert the clockid to a bitmask and use it to check which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * clocks are handled in the VDSO directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) msk = 1U << clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (likely(msk & VDSO_HRES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) vd = &vd[CS_HRES_COARSE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) else if (msk & VDSO_COARSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) else if (msk & VDSO_RAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) vd = &vd[CS_RAW];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return do_hres(vd, clock, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static __maybe_unused int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) __cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct __kernel_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int ret = __cvdso_clock_gettime_common(vd, clock, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return clock_gettime_fallback(clock, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static __maybe_unused int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #ifdef BUILD_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static __maybe_unused int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) __cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct old_timespec32 *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct __kernel_timespec ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ret = __cvdso_clock_gettime_common(vd, clock, &ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return clock_gettime32_fallback(clock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* For ret == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) res->tv_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) res->tv_nsec = ts.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static __maybe_unused int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #endif /* BUILD_VDSO32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static __maybe_unused int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) __cvdso_gettimeofday_data(const struct vdso_data *vd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct __kernel_old_timeval *tv, struct timezone *tz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (likely(tv != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct __kernel_timespec ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return gettimeofday_fallback(tv, tz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) tv->tv_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (unlikely(tz != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (IS_ENABLED(CONFIG_TIME_NS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) vd = __arch_get_timens_vdso_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static __maybe_unused int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #ifdef VDSO_HAS_TIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static __maybe_unused __kernel_old_time_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) __cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) __kernel_old_time_t t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (IS_ENABLED(CONFIG_TIME_NS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) vd = __arch_get_timens_vdso_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *time = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return __cvdso_time_data(__arch_get_vdso_data(), time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #endif /* VDSO_HAS_TIME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #ifdef VDSO_HAS_CLOCK_GETRES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static __maybe_unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct __kernel_timespec *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) u32 msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u64 ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Check for negative values or invalid clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (unlikely((u32) clock >= MAX_CLOCKS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (IS_ENABLED(CONFIG_TIME_NS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) vd = __arch_get_timens_vdso_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Convert the clockid to a bitmask and use it to check which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * clocks are handled in the VDSO directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) msk = 1U << clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (msk & (VDSO_HRES | VDSO_RAW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Preserves the behaviour of posix_get_hrtimer_res().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) } else if (msk & VDSO_COARSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * Preserves the behaviour of posix_get_coarse_res().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ns = LOW_RES_NSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (likely(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) res->tv_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) res->tv_nsec = ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static __maybe_unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct __kernel_timespec *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int ret = __cvdso_clock_getres_common(vd, clock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return clock_getres_fallback(clock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static __maybe_unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #ifdef BUILD_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static __maybe_unused int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) __cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct old_timespec32 *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct __kernel_timespec ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ret = __cvdso_clock_getres_common(vd, clock, &ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return clock_getres32_fallback(clock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (likely(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) res->tv_sec = ts.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) res->tv_nsec = ts.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static __maybe_unused int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) clock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) #endif /* BUILD_VDSO32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #endif /* VDSO_HAS_CLOCK_GETRES */