^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * x86 TSC related functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef _ASM_X86_TSC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define _ASM_X86_TSC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Standard way to access the cycle counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) typedef unsigned long long cycles_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) extern unsigned int cpu_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) extern unsigned int tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) extern void disable_TSC(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static inline cycles_t get_cycles(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #ifndef CONFIG_X86_TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (!boot_cpu_has(X86_FEATURE_TSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) extern struct system_counterval_t convert_art_to_tsc(u64 art);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) extern void tsc_early_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) extern void tsc_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) extern unsigned long calibrate_delay_is_known(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) extern void mark_tsc_unstable(char *reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) extern int unsynchronized_tsc(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) extern int check_tsc_unstable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) extern void mark_tsc_async_resets(char *reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) extern unsigned long native_calibrate_cpu_early(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) extern unsigned long native_calibrate_tsc(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) extern int tsc_clocksource_reliable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #ifdef CONFIG_X86_TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) extern bool tsc_async_resets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) # define tsc_async_resets false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Boot-time check whether the TSCs are synchronized across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * all CPUs/cores:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #ifdef CONFIG_X86_TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) extern void tsc_verify_tsc_adjust(bool resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) extern void check_tsc_sync_source(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) extern void check_tsc_sync_target(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline void tsc_verify_tsc_adjust(bool resume) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline void check_tsc_sync_source(int cpu) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline void check_tsc_sync_target(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) extern int notsc_setup(char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) extern void tsc_save_sched_clock_state(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) extern void tsc_restore_sched_clock_state(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long cpu_khz_from_msr(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif /* _ASM_X86_TSC_H */