^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/acpi_pmtmr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/static_key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/hpet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/vgtod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/x86_init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/geode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/intel-family.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/i8259.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/uv/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) EXPORT_SYMBOL(cpu_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned int __read_mostly tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) EXPORT_SYMBOL(tsc_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define KHZ 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * TSC can be unstable due to cpufreq or due to unsynced TSCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int __read_mostly tsc_unstable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static unsigned int __initdata tsc_early_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static DEFINE_STATIC_KEY_FALSE(__use_tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int tsc_clocksource_reliable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static u32 art_to_tsc_numerator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static u32 art_to_tsc_denominator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static u64 art_to_tsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct clocksource *art_related_clocksource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct cyc2ns {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) seqcount_latch_t seq; /* 32 + 4 = 36 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }; /* fits one cacheline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int __init tsc_early_khz_setup(char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return kstrtouint(buf, 0, &tsc_early_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) early_param("tsc_early_khz", tsc_early_khz_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int seq, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) idx = seq & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) } while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) __always_inline void cyc2ns_read_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Accelerators for sched_clock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * convert from cycles(64bits) => nanoseconds (64bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * basic equation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * ns = cycles / (freq / ns_per_sec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * ns = cycles * (ns_per_sec / freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * ns = cycles * (10^9 / (cpu_khz * 10^3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * ns = cycles * (10^6 / cpu_khz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Then we use scaling math (suggested by george@mvista.com) to get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * ns = cycles * (10^6 * SC / cpu_khz) / SC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * ns = cycles * cyc2ns_scale / SC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * And since SC is a constant power of two, we can convert the div
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * into a shift. The larger SC is, the more accurate the conversion, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * (64-bit result) can be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * We can use khz divisor instead of mhz to keep a better precision.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * (mathieu.desnoyers@polymtl.ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * -johnstul@us.ibm.com "math is hard, lets go shopping!"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct cyc2ns_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long long ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) cyc2ns_read_begin(&data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ns = data.cyc2ns_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cyc2ns_read_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long long ns_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct cyc2ns_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct cyc2ns *c2n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ns_now = cycles_2_ns(tsc_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Compute a new multiplier as per the above comment and ensure our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * time function is continuous; see the comment near struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * cyc2ns_data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) NSEC_PER_MSEC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * not expected to be greater than 31 due to the original published
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * value) - refer perf_event_mmap_page documentation in perf_event.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (data.cyc2ns_shift == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) data.cyc2ns_shift = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) data.cyc2ns_mul >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) data.cyc2ns_offset = ns_now -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) c2n = per_cpu_ptr(&cyc2ns, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) raw_write_seqcount_latch(&c2n->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) c2n->data[0] = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) raw_write_seqcount_latch(&c2n->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) c2n->data[1] = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) sched_clock_idle_sleep_event();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (khz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) __set_cyc2ns_scale(khz, cpu, tsc_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) sched_clock_idle_wakeup_event();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * Initialize cyc2ns for boot cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static void __init cyc2ns_init_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) seqcount_latch_init(&c2n->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Secondary CPUs do not run through tsc_init(), so set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * all the scale factors for all CPUs, assuming the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * speed as the bootup CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void __init cyc2ns_init_secondary_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned int cpu, this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct cyc2ns_data *data = c2n->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (cpu != this_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) seqcount_latch_init(&c2n->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) c2n = per_cpu_ptr(&cyc2ns, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) c2n->data[0] = data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) c2n->data[1] = data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Scheduler clock - returns current time in nanosec units.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u64 native_sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (static_branch_likely(&__use_tsc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u64 tsc_now = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* return the value in ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return cycles_2_ns(tsc_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Fall back to jiffies if there's no TSC available:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * ( But note that we still use it if the TSC is marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * unstable. We do this because unlike Time Of Day,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * the scheduler clock tolerates small errors and it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * very important for it to be as fast as the platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * can achieve it. )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* No locking but a rare wrong value is not a big deal: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Generate a sched_clock if you already have a TSC value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u64 native_sched_clock_from_tsc(u64 tsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return cycles_2_ns(tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* We need to define a real function for sched_clock, to override the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) weak default version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #ifdef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned long long sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return paravirt_sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bool using_native_sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return pv_ops.time.sched_clock == native_sched_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned long long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) sched_clock(void) __attribute__((alias("native_sched_clock")));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) bool using_native_sched_clock(void) { return true; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int check_tsc_unstable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return tsc_unstable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) EXPORT_SYMBOL_GPL(check_tsc_unstable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #ifdef CONFIG_X86_TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int __init notsc_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) mark_tsc_unstable("boot parameter notsc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * disable flag for tsc. Takes effect by clearing the TSC cpu flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * in cpu/common.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int __init notsc_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) setup_clear_cpu_cap(X86_FEATURE_TSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) __setup("notsc", notsc_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int no_sched_irq_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static int no_tsc_watchdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int __init tsc_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!strcmp(str, "reliable"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) tsc_clocksource_reliable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!strncmp(str, "noirqtime", 9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) no_sched_irq_time = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (!strcmp(str, "unstable"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) mark_tsc_unstable("boot parameter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!strcmp(str, "nowatchdog"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) no_tsc_watchdog = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) __setup("tsc=", tsc_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #define MAX_RETRIES 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #define TSC_DEFAULT_THRESHOLD 0x20000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Read TSC and the reference counters. Take care of any disturbances
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static u64 tsc_read_refs(u64 *p, int hpet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u64 t1, t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) for (i = 0; i < MAX_RETRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) t1 = get_cycles();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (hpet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) *p = acpi_pm_read_early();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) t2 = get_cycles();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if ((t2 - t1) < thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * Calculate the TSC frequency from HPET reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) u64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (hpet2 < hpet1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) hpet2 += 0x100000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) hpet2 -= hpet1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) do_div(tmp, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) deltatsc = div64_u64(deltatsc, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return (unsigned long) deltatsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Calculate the TSC frequency from PMTimer reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) u64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!pm1 && !pm2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (pm2 < pm1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) pm2 += (u64)ACPI_PM_OVRRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) pm2 -= pm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) tmp = pm2 * 1000000000LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) do_div(tmp, PMTMR_TICKS_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) do_div(deltatsc, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return (unsigned long) deltatsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #define CAL_MS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #define CAL_PIT_LOOPS 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #define CAL2_MS 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #define CAL2_PIT_LOOPS 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Try to calibrate the TSC against the Programmable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * Interrupt Timer and return the frequency of the TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * in kHz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Return ULONG_MAX on failure to calibrate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u64 tsc, t1, t2, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned long tscmin, tscmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int pitcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!has_legacy_pic()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Relies on tsc_early_delay_calibrate() to have given us semi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * usable udelay(), wait for the same 50ms we would have with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * the PIT loop below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) udelay(10 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) udelay(10 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) udelay(10 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) udelay(10 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) udelay(10 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Set the Gate high, disable speaker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) outb((inb(0x61) & ~0x02) | 0x01, 0x61);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Setup CTC channel 2* for mode 0, (interrupt on terminal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * count mode), binary count. Set the latch register to 50ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * (LSB then MSB) to begin countdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) outb(0xb0, 0x43);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) outb(latch & 0xff, 0x42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) outb(latch >> 8, 0x42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) tsc = t1 = t2 = get_cycles();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) pitcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) tscmax = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) tscmin = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) while ((inb(0x61) & 0x20) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) t2 = get_cycles();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) delta = t2 - tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) tsc = t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if ((unsigned long) delta < tscmin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) tscmin = (unsigned int) delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if ((unsigned long) delta > tscmax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) tscmax = (unsigned int) delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) pitcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Sanity checks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * If we were not able to read the PIT more than loopmin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * times, then we have been hit by a massive SMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * If the maximum is 10 times larger than the minimum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * then we got hit by an SMI as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (pitcnt < loopmin || tscmax > 10 * tscmin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Calculate the PIT value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) delta = t2 - t1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) do_div(delta, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * This reads the current MSB of the PIT counter, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * checks if we are running on sufficiently fast and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * non-virtualized hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Our expectations are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * - the PIT is running at roughly 1.19MHz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * - each IO is going to take about 1us on real hardware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * but we allow it to be much faster (by a factor of 10) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * _slightly_ slower (ie we allow up to a 2us read+counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * update - anything else implies a unacceptably slow CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * or PIT for the fast calibration to work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * - with 256 PIT ticks to read the value, we have 214us to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * see the same MSB (and overhead like doing a single TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * read per MSB value etc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * - We're doing 2 reads per loop (LSB, MSB), and we expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * them each to take about a microsecond on real hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * So we expect a count value of around 100. But we'll be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * generous, and accept anything over 50.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * - if the PIT is stuck, and we see *many* more reads, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * return early (and the next caller of pit_expect_msb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * then consider it a failure when they don't see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * next expected value).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * These expectations mean that we know that we have seen the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * transition from one expected value to another with a fairly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * high accuracy, and we didn't miss any events. We can thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * use the TSC value at the transitions to calculate a pretty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * good value for the TSC frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static inline int pit_verify_msb(unsigned char val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Ignore LSB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) inb(0x42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return inb(0x42) == val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) u64 tsc = 0, prev_tsc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) for (count = 0; count < 50000; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!pit_verify_msb(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) prev_tsc = tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) tsc = get_cycles();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *deltap = get_cycles() - prev_tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *tscp = tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * We require _some_ success, but the quality control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * will be based on the error terms on the TSC values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return count > 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * How many MSB values do we want to see? We aim for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * a maximum error rate of 500ppm (in practice the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * real error is much smaller), but refuse to spend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * more than 50ms on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #define MAX_QUICK_PIT_MS 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static unsigned long quick_pit_calibrate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) u64 tsc, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) unsigned long d1, d2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!has_legacy_pic())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* Set the Gate high, disable speaker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) outb((inb(0x61) & ~0x02) | 0x01, 0x61);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Counter 2, mode 0 (one-shot), binary count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * NOTE! Mode 2 decrements by two (and then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * output is flipped each time, giving the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * final output frequency as a decrement-by-one),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * so mode 0 is much better when looking at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * individual counts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) outb(0xb0, 0x43);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* Start at 0xffff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) outb(0xff, 0x42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) outb(0xff, 0x42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * The PIT starts counting at the next edge, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * need to delay for a microsecond. The easiest way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * to do that is to just read back the 16-bit counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * once from the PIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pit_verify_msb(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (pit_expect_msb(0xff, &tsc, &d1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!pit_expect_msb(0xff-i, &delta, &d2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) delta -= tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * Extrapolate the error and fail fast if the error will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * never be below 500 ppm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (i == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * Iterate until the error is less than 500 ppm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (d1+d2 >= delta >> 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * Check the PIT one more time to verify that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * all TSC reads were stable wrt the PIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * This also guarantees serialization of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * last cycle read ('d2') in pit_expect_msb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (!pit_verify_msb(0xfe - i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) pr_info("Fast TSC calibration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * Ok, if we get here, then we've seen the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * MSB of the PIT decrement 'i' times, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * error has shrunk to less than 500 ppm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * As a result, we can depend on there not being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * any odd delays anywhere, and the TSC reads are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * reliable (within the error).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * kHz = ticks / time-in-seconds / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) delta *= PIT_TICK_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) do_div(delta, i*256*1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) pr_info("Fast TSC calibration using PIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * native_calibrate_tsc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Determine TSC frequency via CPUID, else return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) unsigned long native_calibrate_tsc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) unsigned int crystal_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (boot_cpu_data.cpuid_level < 0x15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) eax_denominator = ebx_numerator = ecx_hz = edx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (ebx_numerator == 0 || eax_denominator == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) crystal_khz = ecx_hz / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * Denverton SoCs don't report crystal clock, and also don't support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (crystal_khz == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) crystal_khz = 25000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * TSC frequency reported directly by CPUID is a "hardware reported"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * frequency and is the most accurate one so far we have. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * is considered a known frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (crystal_khz != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * Some Intel SoCs like Skylake and Kabylake don't report the crystal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * clock, but we can easily calculate it to a high degree of accuracy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * by considering the crystal ratio and the CPU speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) unsigned int eax_base_mhz, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) crystal_khz = eax_base_mhz * 1000 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) eax_denominator / ebx_numerator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (crystal_khz == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * For Atom SoCs TSC is the only reliable clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Mark TSC reliable so no watchdog on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) #ifdef CONFIG_X86_LOCAL_APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * The local APIC appears to be fed by the core crystal clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * (which sounds entirely sensible). We can set the global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * lapic_timer_period here to avoid having to calibrate the APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * timer later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) lapic_timer_period = crystal_khz * 1000 / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return crystal_khz * ebx_numerator / eax_denominator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static unsigned long cpu_khz_from_cpuid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (boot_cpu_data.cpuid_level < 0x16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return eax_base_mhz * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * calibrate cpu using pit, hpet, and ptimer methods. They are available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * later in boot after acpi is initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) u64 tsc1, tsc2, delta, ref1, ref2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned long flags, latch, ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int hpet = is_hpet_enabled(), i, loopmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * Run 5 calibration loops to get the lowest frequency value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * (the best estimate). We use two different calibration modes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * load a timeout of 50ms. We read the time right after we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * started the timer and wait until the PIT count down reaches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * zero. In each wait loop iteration we read the TSC and check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * the delta to the previous read. We keep track of the min
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * and max values of that delta. The delta is mostly defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * by the IO time of the PIT access, so we can detect when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * any disturbance happened between the two reads. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * maximum time is significantly larger than the minimum time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * then we discard the result and have another try.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * 2) Reference counter. If available we use the HPET or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * PMTIMER as a reference to check the sanity of that value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * We use separate TSC readouts and check inside of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * reference read for any possible disturbance. We dicard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * disturbed values here as well. We do that around the PIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * calibration delay loop as we have to wait for a certain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * amount of time anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Preset PIT loop values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) latch = CAL_LATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ms = CAL_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) loopmin = CAL_PIT_LOOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) unsigned long tsc_pit_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Read the start value and the reference count of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * hpet/pmtimer when available. Then do the PIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * calibration, which will take at least 50ms, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * read the end value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) tsc1 = tsc_read_refs(&ref1, hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) tsc2 = tsc_read_refs(&ref2, hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* Pick the lowest PIT TSC calibration so far */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* hpet or pmtimer available ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (ref1 == ref2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* Check, whether the sampling was disturbed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) tsc2 = (tsc2 - tsc1) * 1000000LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (hpet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Check the reference deviation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) delta = ((u64) tsc_pit_min) * 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) do_div(delta, tsc_ref_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * If both calibration results are inside a 10% window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * then we can be sure, that the calibration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * succeeded. We break out of the loop right away. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * use the reference value, as it is more precise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (delta >= 90 && delta <= 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) pr_info("PIT calibration matches %s. %d loops\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) hpet ? "HPET" : "PMTIMER", i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return tsc_ref_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * Check whether PIT failed more than once. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * happens in virtualized environments. We need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * give the virtual PC a slightly longer timeframe for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * the HPET/PMTIMER to make the result precise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (i == 1 && tsc_pit_min == ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) latch = CAL2_LATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ms = CAL2_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) loopmin = CAL2_PIT_LOOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * Now check the results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (tsc_pit_min == ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* PIT gave no useful value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) pr_warn("Unable to calibrate against PIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* We don't have an alternative source, disable TSC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (!hpet && !ref1 && !ref2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) pr_notice("No reference (HPET/PMTIMER) available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* The alternative source failed as well, disable TSC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (tsc_ref_min == ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) pr_warn("HPET/PMTIMER calibration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Use the alternative source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pr_info("using %s reference calibration\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) hpet ? "HPET" : "PMTIMER");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return tsc_ref_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /* We don't have an alternative source, use the PIT calibration value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!hpet && !ref1 && !ref2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) pr_info("Using PIT calibration value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return tsc_pit_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* The alternative source failed, use the PIT calibration value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (tsc_ref_min == ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return tsc_pit_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * The calibration values differ too much. In doubt, we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * the PIT value as we know that there are PMTIMERs around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * running at double speed. At least we let the user know:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) pr_warn("PIT calibration deviates from %s: %lu %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) pr_info("Using PIT calibration value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return tsc_pit_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * native_calibrate_cpu_early - can calibrate the cpu early in boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) unsigned long native_calibrate_cpu_early(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (!fast_calibrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) fast_calibrate = cpu_khz_from_msr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (!fast_calibrate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) fast_calibrate = quick_pit_calibrate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return fast_calibrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * native_calibrate_cpu - calibrate the cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static unsigned long native_calibrate_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) unsigned long tsc_freq = native_calibrate_cpu_early();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (!tsc_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) tsc_freq = pit_hpet_ptimer_calibrate_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return tsc_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) void recalibrate_cpu_khz(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) unsigned long cpu_khz_old = cpu_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (!boot_cpu_has(X86_FEATURE_TSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) cpu_khz = x86_platform.calibrate_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) tsc_khz = x86_platform.calibrate_tsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (tsc_khz == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) tsc_khz = cpu_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) cpu_khz = tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) cpu_khz_old, cpu_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) EXPORT_SYMBOL(recalibrate_cpu_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) static unsigned long long cyc2ns_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) void tsc_save_sched_clock_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (!sched_clock_stable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) cyc2ns_suspend = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * Even on processors with invariant TSC, TSC gets reset in some the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * arbitrary value (still sync'd across cpu's) during resume from such sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * that sched_clock() continues from the point where it was left off during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) void tsc_restore_sched_clock_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) unsigned long long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (!sched_clock_stable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * We're coming out of suspend, there's no concurrency yet; don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * bother being nice about the RCU stuff, just write to both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * data fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) offset = cyc2ns_suspend - sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) #ifdef CONFIG_CPU_FREQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * as unstable and give up in those cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * Should fix up last_tsc too. Currently gettimeofday in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * first tick after the change will be slightly wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static unsigned int ref_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static unsigned long loops_per_jiffy_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static unsigned long tsc_khz_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct cpufreq_freqs *freq = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (num_online_cpus() > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) mark_tsc_unstable("cpufreq changes on SMP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (!ref_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ref_freq = freq->old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) tsc_khz_ref = tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) boot_cpu_data.loops_per_jiffy =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!(freq->flags & CPUFREQ_CONST_LOOPS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) mark_tsc_unstable("cpufreq changes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static struct notifier_block time_cpufreq_notifier_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) .notifier_call = time_cpufreq_notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static int __init cpufreq_register_tsc_scaling(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (!boot_cpu_has(X86_FEATURE_TSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) cpufreq_register_notifier(&time_cpufreq_notifier_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) CPUFREQ_TRANSITION_NOTIFIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) core_initcall(cpufreq_register_tsc_scaling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) #endif /* CONFIG_CPU_FREQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) #define ART_CPUID_LEAF (0x15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #define ART_MIN_DENOMINATOR (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * If ART is present detect the numerator:denominator to convert to TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static void __init detect_art(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) unsigned int unused[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * and the TSC counter resets must not occur asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) tsc_async_resets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) &art_to_tsc_numerator, unused, unused+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /* Make this sticky over multiple CPU init calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) setup_force_cpu_cap(X86_FEATURE_ART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* clocksource code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static void tsc_resume(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) tsc_verify_tsc_adjust(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * We used to compare the TSC to the cycle_last value in the clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * structure to avoid a nasty time-warp. This can be observed in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * very small window right after one CPU updated cycle_last under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * is smaller than the cycle_last reference value due to a TSC which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * is slighty behind. This delta is nowhere else observable, but in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * that case it results in a forward time jump in the range of hours
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * due to the unsigned delta calculation of the time keeping core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * code, which is necessary to support wrapping clocksources like pm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * This sanity check is now done in the core timekeeping code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * checking the result of read_tsc() - cycle_last for being negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static u64 read_tsc(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return (u64)rdtsc_ordered();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static void tsc_cs_mark_unstable(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (tsc_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) tsc_unstable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (using_native_sched_clock())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) clear_sched_clock_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) disable_sched_clock_irqtime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) pr_info("Marking TSC unstable due to clocksource watchdog\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static void tsc_cs_tick_stable(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (tsc_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (using_native_sched_clock())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) sched_clock_tick_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static int tsc_cs_enable(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) vclocks_set_used(VDSO_CLOCKMODE_TSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static struct clocksource clocksource_tsc_early = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) .name = "tsc-early",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) .rating = 299,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) .read = read_tsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) .mask = CLOCKSOURCE_MASK(64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) .flags = CLOCK_SOURCE_IS_CONTINUOUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) CLOCK_SOURCE_MUST_VERIFY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .enable = tsc_cs_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .resume = tsc_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) .mark_unstable = tsc_cs_mark_unstable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) .tick_stable = tsc_cs_tick_stable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) .list = LIST_HEAD_INIT(clocksource_tsc_early.list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * this one will immediately take over. We will only register if TSC has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * been found good.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static struct clocksource clocksource_tsc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) .name = "tsc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) .rating = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .read = read_tsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .mask = CLOCKSOURCE_MASK(64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .flags = CLOCK_SOURCE_IS_CONTINUOUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) CLOCK_SOURCE_VALID_FOR_HRES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) CLOCK_SOURCE_MUST_VERIFY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) CLOCK_SOURCE_VERIFY_PERCPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) .enable = tsc_cs_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) .resume = tsc_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) .mark_unstable = tsc_cs_mark_unstable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) .tick_stable = tsc_cs_tick_stable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) .list = LIST_HEAD_INIT(clocksource_tsc.list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) void mark_tsc_unstable(char *reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (tsc_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) tsc_unstable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (using_native_sched_clock())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) clear_sched_clock_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) disable_sched_clock_irqtime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) pr_info("Marking TSC unstable due to %s\n", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) clocksource_mark_unstable(&clocksource_tsc_early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) clocksource_mark_unstable(&clocksource_tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) EXPORT_SYMBOL_GPL(mark_tsc_unstable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) static void __init tsc_disable_clocksource_watchdog(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static void __init check_system_tsc_reliable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (is_geode_lx()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* RTSC counts during suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) #define RTSC_SUSP 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) unsigned long res_low, res_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /* Geode_LX - the OLPC CPU has a very reliable TSC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (res_low & RTSC_SUSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) tsc_clocksource_reliable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) tsc_clocksource_reliable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * Disable the clocksource watchdog when the system has:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * - TSC running at constant frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * - TSC which does not stop in C-States
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * - the TSC_ADJUST register which allows to detect even minimal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * modifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * - not more than two sockets. As the number of sockets cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * evaluated at the early boot stage where this has to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * invoked, check the number of online memory nodes as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * fallback solution which is an reasonable estimate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) nr_online_nodes <= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) tsc_disable_clocksource_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * Make an educated guess if the TSC is trustworthy and synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * over all CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) int unsynchronized_tsc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (apic_is_clustered_box())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (tsc_clocksource_reliable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * Intel systems are normally all synchronized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * Exceptions must mark TSC as unstable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* assume multi socket systems are not synchronized: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (num_possible_cpus() > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * Convert ART to TSC given numerator/denominator found in detect_art()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct system_counterval_t convert_art_to_tsc(u64 art)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) u64 tmp, res, rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) rem = do_div(art, art_to_tsc_denominator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) res = art * art_to_tsc_numerator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) tmp = rem * art_to_tsc_numerator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) do_div(tmp, art_to_tsc_denominator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) res += tmp + art_to_tsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return (struct system_counterval_t) {.cs = art_related_clocksource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) .cycles = res};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) EXPORT_SYMBOL(convert_art_to_tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * @art_ns: ART (Always Running Timer) in unit of nanoseconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * PTM requires all timestamps to be in units of nanoseconds. When user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * software requests a cross-timestamp, this function converts system timestamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * to TSC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * that this flag is set before conversion to TSC is attempted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * struct system_counterval_t - system counter value with the pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * corresponding clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * @cycles: System counter value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * @cs: Clocksource corresponding to system counter value. Used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * by timekeeping code to verify comparibility of two cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) u64 tmp, res, rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) rem = do_div(art_ns, USEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) res = art_ns * tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) tmp = rem * tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) do_div(tmp, USEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) res += tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return (struct system_counterval_t) { .cs = art_related_clocksource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) .cycles = res};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) EXPORT_SYMBOL(convert_art_ns_to_tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static void tsc_refine_calibration_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * tsc_refine_calibration_work - Further refine tsc freq calibration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * @work - ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * This functions uses delayed work over a period of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * second to further refine the TSC freq value. Since this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * timer based, instead of loop based, we don't block the boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * process while this longer calibration is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * If there are any calibration anomalies (too many SMIs, etc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * or the refined calibration is off by 1% of the fast early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * calibration, we throw out the new calibration and use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * early calibration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static void tsc_refine_calibration_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static u64 tsc_start = ULLONG_MAX, ref_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static int hpet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) u64 tsc_stop, ref_stop, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) unsigned long freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /* Don't bother refining TSC on unstable systems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (tsc_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) goto unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * Since the work is started early in boot, we may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * delayed the first time we expire. So set the workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * again once we know timers are working.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (tsc_start == ULLONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * Only set hpet once, to avoid mixing hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * if the hpet becomes enabled later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) hpet = is_hpet_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) tsc_start = tsc_read_refs(&ref_start, hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) schedule_delayed_work(&tsc_irqwork, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) tsc_stop = tsc_read_refs(&ref_stop, hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /* hpet or pmtimer available ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (ref_start == ref_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /* Check, whether the sampling was disturbed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (tsc_stop == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) delta = tsc_stop - tsc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) delta *= 1000000LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (hpet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) freq = calc_hpet_ref(delta, ref_start, ref_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /* Make sure we're within 1% */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (abs(tsc_khz - freq) > tsc_khz/100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) tsc_khz = freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) (unsigned long)tsc_khz / 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) (unsigned long)tsc_khz % 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) /* Inform the TSC deadline clockevent devices about the recalibration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) lapic_update_tsc_freq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /* Update the sched_clock() rate to match the clocksource one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (tsc_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) goto unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (boot_cpu_has(X86_FEATURE_ART))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) art_related_clocksource = &clocksource_tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) clocksource_register_khz(&clocksource_tsc, tsc_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) unreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) clocksource_unregister(&clocksource_tsc_early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static int __init init_tsc_clocksource(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (tsc_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) goto unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * When TSC frequency is known (retrieved via MSR or CPUID), we skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * the refined calibration and directly register it as a clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (boot_cpu_has(X86_FEATURE_ART))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) art_related_clocksource = &clocksource_tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) clocksource_register_khz(&clocksource_tsc, tsc_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) unreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) clocksource_unregister(&clocksource_tsc_early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) schedule_delayed_work(&tsc_irqwork, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * We use device_initcall here, to ensure we run after the hpet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * is fully initialized, which may occur at fs_initcall time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) device_initcall(init_tsc_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static bool __init determine_cpu_tsc_frequencies(bool early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /* Make sure that cpu and tsc are not already calibrated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) WARN_ON(cpu_khz || tsc_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (early) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) cpu_khz = x86_platform.calibrate_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (tsc_early_khz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) tsc_khz = tsc_early_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) tsc_khz = x86_platform.calibrate_tsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /* We should not be here with non-native cpu calibration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) cpu_khz = pit_hpet_ptimer_calibrate_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * Trust non-zero tsc_khz as authoritative,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * and use it to sanity check cpu_khz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * which will be off if system timer is off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (tsc_khz == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) tsc_khz = cpu_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) cpu_khz = tsc_khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (tsc_khz == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) pr_info("Detected %lu.%03lu MHz processor\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) (unsigned long)cpu_khz / KHZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) (unsigned long)cpu_khz % KHZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (cpu_khz != tsc_khz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) pr_info("Detected %lu.%03lu MHz TSC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) (unsigned long)tsc_khz / KHZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) (unsigned long)tsc_khz % KHZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static unsigned long __init get_loops_per_jiffy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) u64 lpj = (u64)tsc_khz * KHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) do_div(lpj, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return lpj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) static void __init tsc_enable_sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /* Sanitize TSC ADJUST before cyc2ns gets initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) tsc_store_and_check_tsc_adjust(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) cyc2ns_init_boot_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) static_branch_enable(&__use_tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) void __init tsc_early_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (!boot_cpu_has(X86_FEATURE_TSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /* Don't change UV TSC multi-chassis synchronization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (is_early_uv_system())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (!determine_cpu_tsc_frequencies(true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) loops_per_jiffy = get_loops_per_jiffy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) tsc_enable_sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) void __init tsc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * native_calibrate_cpu_early can only calibrate using methods that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * available early in boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) x86_platform.calibrate_cpu = native_calibrate_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (!boot_cpu_has(X86_FEATURE_TSC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (!tsc_khz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /* We failed to determine frequencies earlier, try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (!determine_cpu_tsc_frequencies(false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) mark_tsc_unstable("could not calculate TSC khz");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) tsc_enable_sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) cyc2ns_init_secondary_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (!no_sched_irq_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) enable_sched_clock_irqtime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) lpj_fine = get_loops_per_jiffy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) use_tsc_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) check_system_tsc_reliable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (unsynchronized_tsc()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) mark_tsc_unstable("TSCs unsynchronized");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (tsc_clocksource_reliable || no_tsc_watchdog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) tsc_disable_clocksource_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) detect_art();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * If we have a constant TSC and are using the TSC for the delay loop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * we can skip clock calibration if another cpu in the same socket has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * been calibrated. This assumes that CONSTANT_TSC applies to all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * cpus in the socket - this should be a safe assumption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) unsigned long calibrate_delay_is_known(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) int sibling, cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) const struct cpumask *mask = topology_core_cpumask(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!constant_tsc || !mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) sibling = cpumask_any_but(mask, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (sibling < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return cpu_data(sibling).loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) #endif