Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * sched_clock() for unstable CPU clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *  Updates and enhancements:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Based on code by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *   Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *   Guillaume Chazarain <guichaz@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * What this file implements:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * cpu_clock(i) provides a fast (execution time) high resolution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * clock with bounded drift between CPUs. The value of cpu_clock(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * is monotonic for constant i. The timestamp returned is in nanoseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * ######################### BIG FAT WARNING ##########################
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * # go backwards !!                                                  #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * ####################################################################
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * There is no strict promise about the base, although it tends to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * at 0 on boot (but people really shouldn't rely on that).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * cpu_clock(i)       -- can be used from any context, including NMI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * local_clock()      -- is cpu_clock() on the current CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * sched_clock_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * How it is implemented:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * The implementation either uses sched_clock() when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * sched_clock() is assumed to provide these properties (mostly it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * the architecture provides a globally synchronized highres time source).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * Otherwise it tries to create a semi stable clock from a mixture of other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * clocks, including:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *  - GTOD (clock monotomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *  - sched_clock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *  - explicit idle events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * We use GTOD as base and use sched_clock() deltas to improve resolution. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * deltas are filtered to provide monotonicity and keeping it within an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * expected window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * Furthermore, explicit sleep and wakeup hooks allow us to account for time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * that is otherwise invisible (TSC gets stopped).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #include <linux/sched_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * Scheduler clock - returns current time in nanosec units.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * This is default implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * Architectures and sub-architectures can override this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) unsigned long long __weak sched_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 					* (NSEC_PER_SEC / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) EXPORT_SYMBOL_GPL(sched_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * We must start with !__sched_clock_stable because the unstable -> stable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * transition is accurate, while the stable -> unstable transition is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * Similarly we start with __sched_clock_stable_early, thereby assuming we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * will become stable, such that there's only a single 1 -> 0 transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static int __sched_clock_stable_early = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) __read_mostly u64 __sched_clock_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static __read_mostly u64 __gtod_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) struct sched_clock_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	u64			tick_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	u64			tick_gtod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	u64			clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static inline struct sched_clock_data *this_scd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	return this_cpu_ptr(&sched_clock_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline struct sched_clock_data *cpu_sdc(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	return &per_cpu(sched_clock_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int sched_clock_stable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return static_branch_likely(&__sched_clock_stable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void __scd_stamp(struct sched_clock_data *scd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	scd->tick_gtod = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	scd->tick_raw = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void __set_sched_clock_stable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct sched_clock_data *scd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 * Since we're still unstable and the tick is already running, we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 * to disable IRQs in order to get a consistent scd->tick* reading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	scd = this_scd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * Attempt to make the (initial) unstable->stable transition continuous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	__sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			scd->tick_gtod, __gtod_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			scd->tick_raw,  __sched_clock_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	static_branch_enable(&__sched_clock_stable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * If we ever get here, we're screwed, because we found out -- typically after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * the fact -- that TSC wasn't good. This means all our clocksources (including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * ktime) could have reported wrong values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * What we do here is an attempt to fix up and continue sort of where we left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * off in a coherent manner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  * The only way to fully avoid random clock jumps is to boot with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * "tsc=unstable".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void __sched_clock_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	struct sched_clock_data *scd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	/* take a current timestamp and set 'now' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	scd = this_scd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	__scd_stamp(scd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	scd->clock = scd->tick_gtod + __gtod_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	/* clone to all CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		per_cpu(sched_clock_data, cpu) = *scd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			scd->tick_gtod, __gtod_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			scd->tick_raw,  __sched_clock_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	static_branch_disable(&__sched_clock_stable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static DECLARE_WORK(sched_clock_work, __sched_clock_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void __clear_sched_clock_stable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (!sched_clock_stable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	schedule_work(&sched_clock_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void clear_sched_clock_stable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	__sched_clock_stable_early = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	smp_mb(); /* matches sched_clock_init_late() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (static_key_count(&sched_clock_running.key) == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		__clear_sched_clock_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void __sched_clock_gtod_offset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct sched_clock_data *scd = this_scd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	__scd_stamp(scd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	__gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void __init sched_clock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	 * Set __gtod_offset such that once we mark sched_clock_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	 * sched_clock_tick() continues where sched_clock() left off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 * Even if TSC is buggered, we're still UP at this point so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 * can't really be out of sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	__sched_clock_gtod_offset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	static_branch_inc(&sched_clock_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * We run this as late_initcall() such that it runs after all built-in drivers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int __init sched_clock_init_late(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	static_branch_inc(&sched_clock_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	 * Ensure that it is impossible to not do a static_key update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * and do the update, or we must see their __sched_clock_stable_early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 * and do the update, or both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	smp_mb(); /* matches {set,clear}_sched_clock_stable() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (__sched_clock_stable_early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		__set_sched_clock_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) late_initcall(sched_clock_init_late);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  * min, max except they take wrapping into account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static inline u64 wrap_min(u64 x, u64 y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return (s64)(x - y) < 0 ? x : y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static inline u64 wrap_max(u64 x, u64 y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return (s64)(x - y) > 0 ? x : y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * update the percpu scd from the raw @now value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  *  - filter out backward motion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  *  - use the GTOD tick value to create a window to filter crazy TSC values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static u64 sched_clock_local(struct sched_clock_data *scd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	u64 now, clock, old_clock, min_clock, max_clock, gtod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	now = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	delta = now - scd->tick_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	if (unlikely(delta < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	old_clock = scd->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	 * scd->clock = clamp(scd->tick_gtod + delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	 *		      max(scd->tick_gtod, scd->clock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	 *		      scd->tick_gtod + TICK_NSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	gtod = scd->tick_gtod + __gtod_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	clock = gtod + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	min_clock = wrap_max(gtod, old_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	clock = wrap_max(clock, min_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	clock = wrap_min(clock, max_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	return clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static u64 sched_clock_remote(struct sched_clock_data *scd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	struct sched_clock_data *my_scd = this_scd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	u64 this_clock, remote_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	u64 *ptr, old_val, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #if BITS_PER_LONG != 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * Careful here: The local and the remote clock values need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 * be read out atomic as we need to compare the values and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 * then update either the local or the remote side. So the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 * cmpxchg64 below only protects one readout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 * We must reread via sched_clock_local() in the retry case on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	 * 32-bit kernels as an NMI could use sched_clock_local() via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	 * tracer and hit between the readout of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	 * the low 32-bit and the high 32-bit portion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	this_clock = sched_clock_local(my_scd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 * We must enforce atomic readout on 32-bit, otherwise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 * update on the remote CPU can hit inbetween the readout of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	 * the low 32-bit and the high 32-bit portion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	remote_clock = cmpxchg64(&scd->clock, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 * On 64-bit kernels the read of [my]scd->clock is atomic versus the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * update, so we can avoid the above 32-bit dance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	sched_clock_local(my_scd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	this_clock = my_scd->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	remote_clock = scd->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	 * Use the opportunity that we have both locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	 * taken to couple the two clocks: we take the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	 * larger time as the latest time for both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	 * runqueues. (this creates monotonic movement)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (likely((s64)(remote_clock - this_clock) < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		ptr = &scd->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		old_val = remote_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		val = this_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		 * Should be rare, but possible:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		ptr = &my_scd->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		old_val = this_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		val = remote_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	if (cmpxchg64(ptr, old_val, val) != old_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  * Similar to cpu_clock(), but requires local IRQs to be disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * See cpu_clock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) u64 sched_clock_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	struct sched_clock_data *scd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	u64 clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	if (sched_clock_stable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		return sched_clock() + __sched_clock_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (!static_branch_likely(&sched_clock_running))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		return sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	scd = cpu_sdc(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	if (cpu != smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		clock = sched_clock_remote(scd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		clock = sched_clock_local(scd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	return clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) EXPORT_SYMBOL_GPL(sched_clock_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) void sched_clock_tick(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	struct sched_clock_data *scd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	if (sched_clock_stable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (!static_branch_likely(&sched_clock_running))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	scd = this_scd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	__scd_stamp(scd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	sched_clock_local(scd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) void sched_clock_tick_stable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (!sched_clock_stable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	 * Called under watchdog_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	 * The watchdog just found this TSC to (still) be stable, so now is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	 * good moment to update our __gtod_offset. Because once we find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	 * TSC to be unstable, any computation will be computing crap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	__sched_clock_gtod_offset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  * We are going deep-idle (irqs are disabled):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) void sched_clock_idle_sleep_event(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	sched_clock_cpu(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  * We just idled; resync with ktime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) void sched_clock_idle_wakeup_event(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	if (sched_clock_stable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	if (unlikely(timekeeping_suspended))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	sched_clock_tick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) void __init sched_clock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	static_branch_inc(&sched_clock_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	generic_sched_clock_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) u64 sched_clock_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	if (!static_branch_likely(&sched_clock_running))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	return sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)  * Running clock - returns the time that has elapsed while a guest has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)  * running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)  * On a guest this value should be local_clock minus the time the guest was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)  * suspended by the hypervisor (for any reason).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)  * On bare metal this function should return the same as local_clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)  * Architectures and sub-architectures can override this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) u64 __weak running_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	return local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }