^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_PVCLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_PVCLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/pvclock-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /* some helper functions for xen and kvm pv clock sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) void pvclock_set_flags(u8 flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct pvclock_vcpu_time_info *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct timespec64 *ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) void pvclock_resume(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) void pvclock_touch_watchdogs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned pvclock_read_begin(const struct pvclock_vcpu_time_info *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned version = src->version & ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* Make sure that the version is read before the data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) virt_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Make sure that the version is re-read after the data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) virt_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return unlikely(version != src->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * yielding a 64-bit result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u64 product;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifdef __i386__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 tmp1, tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ulong tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (shift < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) delta >>= -shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) delta <<= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #ifdef __i386__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __asm__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) "mul %5 ; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) "mov %4,%%eax ; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "mov %%edx,%4 ; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) "mul %5 ; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) "xor %5,%5 ; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) "add %4,%%eax ; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) "adc %5,%%edx ; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) : "=A" (product), "=r" (tmp1), "=r" (tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #elif defined(__x86_64__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __asm__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) : [lo]"=a"(product),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) [hi]"=d"(tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) : "0"(delta),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) [mul_frac]"rm"((u64)mul_frac));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #error implement me!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return product;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u64 delta = tsc - src->tsc_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) src->tsc_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return src->system_time + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct pvclock_vsyscall_time_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct pvclock_vcpu_time_info pvti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) } __attribute__((__aligned__(SMP_CACHE_BYTES)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #ifdef CONFIG_PARAVIRT_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif /* _ASM_X86_PVCLOCK_H */