Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * This file contains the functions which manage clocksource drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "tick-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "timekeeping_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * @mult:	pointer to mult variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * @shift:	pointer to shift variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * @from:	frequency to convert from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * @to:		frequency to convert to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * @maxsec:	guaranteed runtime conversion range in seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * The function evaluates the shift/mult pair for the scaled math
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * operations of clocksources and clockevents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * @to and @from are frequency values in HZ. For clock sources @to is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * event @to is the counter frequency and @from is NSEC_PER_SEC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * The @maxsec conversion range argument controls the time frame in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * seconds which must be covered by the runtime conversion with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * calculated mult and shift factors. This guarantees that no 64bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * overflow happens when the input value of the conversion is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * multiplied with the calculated mult factor. Larger ranges may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * reduce the conversion accuracy by chosing smaller mult and shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * factors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	u64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	u32 sft, sftacc= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	 * Calculate the shift factor which is limiting the conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	 * range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	tmp = ((u64)maxsec * from) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	while (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		tmp >>=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		sftacc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	 * Find the conversion shift/mult pair which has the best
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	 * accuracy and fits the maxsec conversion range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	for (sft = 32; sft > 0; sft--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		tmp = (u64) to << sft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		tmp += from / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		do_div(tmp, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		if ((tmp >> sftacc) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	*mult = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	*shift = sft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) /*[Clocksource internal variables]---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * curr_clocksource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  *	currently selected clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * suspend_clocksource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  *	used to calculate the suspend time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * clocksource_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  *	linked list with the registered clocksources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * clocksource_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  *	protects manipulations to curr_clocksource and the clocksource_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * override_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  *	Name of the user-specified clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) static struct clocksource *curr_clocksource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static struct clocksource *suspend_clocksource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static LIST_HEAD(clocksource_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) static DEFINE_MUTEX(clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) static char override_name[CS_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static int finished_booting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static u64 suspend_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) static void clocksource_watchdog_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static void clocksource_select(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static LIST_HEAD(watchdog_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static struct clocksource *watchdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static struct timer_list watchdog_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static DEFINE_SPINLOCK(watchdog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static int watchdog_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static atomic_t watchdog_reset_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static inline void clocksource_watchdog_lock(unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	spin_lock_irqsave(&watchdog_lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) static inline void clocksource_watchdog_unlock(unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	spin_unlock_irqrestore(&watchdog_lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) static int clocksource_watchdog_kthread(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) static void __clocksource_change_rating(struct clocksource *cs, int rating);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * Interval: 0.5sec Threshold: 0.0625s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define WATCHDOG_INTERVAL (HZ >> 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * Maximum permissible delay between two readouts of the watchdog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * clocksource surrounding a read of the clocksource being validated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * This delay could be due to SMIs, NMIs, or to VCPU preemptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define WATCHDOG_MAX_SKEW (100 * NSEC_PER_USEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static void clocksource_watchdog_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	 * We cannot directly run clocksource_watchdog_kthread() here, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	 * clocksource_select() calls timekeeping_notify() which uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	 * stop_machine(). One cannot use stop_machine() from a workqueue() due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 * lock inversions wrt CPU hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	 * Also, we only ever run this work once or twice during the lifetime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	 * of the kernel, so there is no point in creating a more permanent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	 * kthread for this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	 * If kthread_run fails the next watchdog scan over the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	 * watchdog_list will find the unstable clock again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static void __clocksource_unstable(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	cs->flags |= CLOCK_SOURCE_UNSTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	 * If the clocksource is registered clocksource_watchdog_kthread() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	 * re-rate and re-select.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (list_empty(&cs->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		cs->rating = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	if (cs->mark_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		cs->mark_unstable(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	/* kick clocksource_watchdog_kthread() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	if (finished_booting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		schedule_work(&watchdog_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * clocksource_mark_unstable - mark clocksource unstable via watchdog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  * @cs:		clocksource to be marked unstable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * This function is called by the x86 TSC code to mark clocksources as unstable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * it defers demotion and re-selection to a kthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) void clocksource_mark_unstable(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	spin_lock_irqsave(&watchdog_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 			list_add(&cs->wd_list, &watchdog_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		__clocksource_unstable(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	spin_unlock_irqrestore(&watchdog_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static ulong max_cswd_read_retries = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) module_param(max_cswd_read_retries, ulong, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	unsigned int nretries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	u64 wd_end, wd_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	int64_t wd_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		*wdnow = watchdog->read(watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		*csnow = cs->read(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		wd_end = watchdog->read(watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 					      watchdog->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		if (wd_delay <= WATCHDOG_MAX_SKEW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			if (nretries > 1 || nretries >= max_cswd_read_retries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 				pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 					smp_processor_id(), watchdog->name, nretries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		smp_processor_id(), watchdog->name, wd_delay, nretries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) static u64 csnow_mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) static cpumask_t cpus_ahead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static cpumask_t cpus_behind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) static void clocksource_verify_one_cpu(void *csin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	struct clocksource *cs = (struct clocksource *)csin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	csnow_mid = cs->read(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static void clocksource_verify_percpu(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	u64 csnow_begin, csnow_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	int cpu, testcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	cpumask_clear(&cpus_ahead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	cpumask_clear(&cpus_behind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	testcpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	pr_warn("Checking clocksource %s synchronization from CPU %d.\n", cs->name, testcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		if (cpu == testcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		csnow_begin = cs->read(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		csnow_end = cs->read(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		if (delta < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			cpumask_set_cpu(cpu, &cpus_behind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		delta = (csnow_end - csnow_mid) & cs->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		if (delta < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			cpumask_set_cpu(cpu, &cpus_ahead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		if (cs_nsec > cs_nsec_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 			cs_nsec_max = cs_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		if (cs_nsec < cs_nsec_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			cs_nsec_min = cs_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	if (!cpumask_empty(&cpus_ahead))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (!cpumask_empty(&cpus_behind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		pr_warn("        CPUs %*pbl behind CPU %d for clocksource %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			cpumask_pr_args(&cpus_behind), testcpu, cs->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		pr_warn("        CPU %d check durations %lldns - %lldns for clocksource %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			testcpu, cs_nsec_min, cs_nsec_max, cs->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static void clocksource_watchdog(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	u64 csnow, wdnow, cslast, wdlast, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	int next_cpu, reset_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	int64_t wd_nsec, cs_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	struct clocksource *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	spin_lock(&watchdog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	if (!watchdog_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	reset_pending = atomic_read(&watchdog_reset_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	list_for_each_entry(cs, &watchdog_list, wd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		/* Clocksource already marked unstable? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			if (finished_booting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 				schedule_work(&watchdog_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		if (!cs_watchdog_read(cs, &csnow, &wdnow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			/* Clock readout unreliable, so give it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			__clocksource_unstable(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		/* Clocksource initialized ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		    atomic_read(&watchdog_reset_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			cs->flags |= CLOCK_SOURCE_WATCHDOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			cs->wd_last = wdnow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			cs->cs_last = csnow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 					     watchdog->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		wdlast = cs->wd_last; /* save these in case we print them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		cslast = cs->cs_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		cs->cs_last = csnow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		cs->wd_last = wdnow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		if (atomic_read(&watchdog_reset_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		/* Check the deviation from the watchdog clocksource. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 				smp_processor_id(), cs->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 			pr_warn("                      '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 				watchdog->name, wdnow, wdlast, watchdog->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			pr_warn("                      '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 				cs->name, csnow, cslast, cs->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			__clocksource_unstable(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		if (cs == curr_clocksource && cs->tick_stable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			cs->tick_stable(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			/* Mark it valid for high-res. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			 * clocksource_done_booting() will sort it if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 			 * finished_booting is not set yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			if (!finished_booting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			 * If this is not the current clocksource let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			 * the watchdog thread reselect it. Due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			 * change to high res this clocksource might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			 * be preferred now. If it is the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			 * clocksource let the tick code know about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			 * that change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			if (cs != curr_clocksource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 				cs->flags |= CLOCK_SOURCE_RESELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 				schedule_work(&watchdog_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 				tick_clock_notify();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	 * We only clear the watchdog_reset_pending, when we did a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	 * full cycle through all clocksources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (reset_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		atomic_dec(&watchdog_reset_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	 * Cycle through CPUs to check if the CPUs stay synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	 * to each other.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	if (next_cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		next_cpu = cpumask_first(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * Arm timer if not already pending: could race with concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * pair clocksource_stop_watchdog() clocksource_start_watchdog().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (!timer_pending(&watchdog_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		watchdog_timer.expires += WATCHDOG_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		add_timer_on(&watchdog_timer, next_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	spin_unlock(&watchdog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) static inline void clocksource_start_watchdog(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	timer_setup(&watchdog_timer, clocksource_watchdog, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	watchdog_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) static inline void clocksource_stop_watchdog(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	del_timer(&watchdog_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	watchdog_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static inline void clocksource_reset_watchdog(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	struct clocksource *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	list_for_each_entry(cs, &watchdog_list, wd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) static void clocksource_resume_watchdog(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	atomic_inc(&watchdog_reset_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) static void clocksource_enqueue_watchdog(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	INIT_LIST_HEAD(&cs->wd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		/* cs is a clocksource to be watched. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		list_add(&cs->wd_list, &watchdog_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		/* cs is a watchdog. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static void clocksource_select_watchdog(bool fallback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	struct clocksource *cs, *old_wd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	spin_lock_irqsave(&watchdog_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	/* save current watchdog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	old_wd = watchdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if (fallback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		watchdog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	list_for_each_entry(cs, &clocksource_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		/* cs is a clocksource to be watched. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		/* Skip current if we were requested for a fallback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		if (fallback && cs == old_wd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		/* Pick the best watchdog. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		if (!watchdog || cs->rating > watchdog->rating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			watchdog = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	/* If we failed to find a fallback restore the old one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if (!watchdog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		watchdog = old_wd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	/* If we changed the watchdog we need to reset cycles. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if (watchdog != old_wd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		clocksource_reset_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	/* Check if the watchdog timer needs to be started. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	clocksource_start_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	spin_unlock_irqrestore(&watchdog_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) static void clocksource_dequeue_watchdog(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (cs != watchdog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 			/* cs is a watched clocksource. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			list_del_init(&cs->wd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			/* Check if the watchdog timer needs to be stopped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			clocksource_stop_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) static int __clocksource_watchdog_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct clocksource *cs, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	int select = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	/* Do any required per-CPU skew verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (curr_clocksource &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	    curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	    curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		clocksource_verify_percpu(curr_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	spin_lock_irqsave(&watchdog_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			list_del_init(&cs->wd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			__clocksource_change_rating(cs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			select = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		if (cs->flags & CLOCK_SOURCE_RESELECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 			cs->flags &= ~CLOCK_SOURCE_RESELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			select = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	/* Check if the watchdog timer needs to be stopped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	clocksource_stop_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	spin_unlock_irqrestore(&watchdog_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	return select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static int clocksource_watchdog_kthread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (__clocksource_watchdog_kthread())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		clocksource_select();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) static bool clocksource_is_watchdog(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	return cs == watchdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) static void clocksource_enqueue_watchdog(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) static void clocksource_select_watchdog(bool fallback) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) static inline void clocksource_resume_watchdog(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) static inline int __clocksource_watchdog_kthread(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) void clocksource_mark_unstable(struct clocksource *cs) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) static inline void clocksource_watchdog_lock(unsigned long *flags) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static bool clocksource_is_suspend(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	return cs == suspend_clocksource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static void __clocksource_suspend_select(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	 * Skip the clocksource which will be stopped in suspend state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	 * The nonstop clocksource can be selected as the suspend clocksource to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	 * calculate the suspend time, so it should not supply suspend/resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	 * interfaces to suspend the nonstop clocksource when system suspends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	if (cs->suspend || cs->resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			cs->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	/* Pick the best rating. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		suspend_clocksource = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  * clocksource_suspend_select - Select the best clocksource for suspend timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  * @fallback:	if select a fallback clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static void clocksource_suspend_select(bool fallback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	struct clocksource *cs, *old_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	old_suspend = suspend_clocksource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (fallback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		suspend_clocksource = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	list_for_each_entry(cs, &clocksource_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		/* Skip current if we were requested for a fallback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		if (fallback && cs == old_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		__clocksource_suspend_select(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * clocksource_start_suspend_timing - Start measuring the suspend timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  * @cs:			current clocksource from timekeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  * @start_cycles:	current cycles from timekeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * This function will save the start cycle values of suspend timer to calculate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  * the suspend time when resuming system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  * This function is called late in the suspend process from timekeeping_suspend(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  * that means processes are freezed, non-boot cpus and interrupts are disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  * now. It is therefore possible to start the suspend timer without taking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  * clocksource mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (!suspend_clocksource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	 * If current clocksource is the suspend timer, we should use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	 * tkr_mono.cycle_last value as suspend_start to avoid same reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	 * from suspend timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	if (clocksource_is_suspend(cs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		suspend_start = start_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	if (suspend_clocksource->enable &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	    suspend_clocksource->enable(suspend_clocksource)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	suspend_start = suspend_clocksource->read(suspend_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  * clocksource_stop_suspend_timing - Stop measuring the suspend timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  * @cs:		current clocksource from timekeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  * @cycle_now:	current cycles from timekeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * This function will calculate the suspend time from suspend timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * This function is called early in the resume process from timekeeping_resume(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  * that means there is only one cpu, no processes are running and the interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  * are disabled. It is therefore possible to stop the suspend timer without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  * taking the clocksource mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	u64 now, delta, nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	if (!suspend_clocksource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	 * If current clocksource is the suspend timer, we should use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	 * tkr_mono.cycle_last value from timekeeping as current cycle to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 * avoid same reading from suspend timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	if (clocksource_is_suspend(cs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		now = cycle_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		now = suspend_clocksource->read(suspend_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	if (now > suspend_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		delta = clocksource_delta(now, suspend_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 					  suspend_clocksource->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 				       suspend_clocksource->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	 * Disable the suspend timer to save power if current clocksource is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 * not the suspend timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		suspend_clocksource->disable(suspend_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	return nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  * clocksource_suspend - suspend the clocksource(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) void clocksource_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	struct clocksource *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	list_for_each_entry_reverse(cs, &clocksource_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		if (cs->suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			cs->suspend(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  * clocksource_resume - resume the clocksource(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) void clocksource_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	struct clocksource *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	list_for_each_entry(cs, &clocksource_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		if (cs->resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			cs->resume(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	clocksource_resume_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725)  * clocksource_touch_watchdog - Update watchdog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727)  * Update the watchdog after exception contexts such as kgdb so as not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728)  * to incorrectly trip the watchdog. This might fail when the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729)  * was stopped in code which holds watchdog_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) void clocksource_touch_watchdog(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	clocksource_resume_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  * clocksource_max_adjustment- Returns max adjustment amount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  * @cs:         Pointer to clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) static u32 clocksource_max_adjustment(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	u64 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 * We won't try to correct for more than 11% adjustments (110,000 ppm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	ret = (u64)cs->mult * 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	do_div(ret,100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	return (u32)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * @mult:	cycle to nanosecond multiplier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * @shift:	cycle to nanosecond divisor (power of two)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * @maxadj:	maximum adjustment value to mult (~11%)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * @mask:	bitmask for two's complement subtraction of non 64 bit counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * @max_cyc:	maximum cycle value before potential overflow (does not include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  *		any safety margin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * NOTE: This function includes a safety margin of 50%, in other words, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  * return half the number of nanoseconds the hardware counter can technically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * cover. This is done so that we can potentially detect problems caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * delayed timers or bad hardware, which might result in time intervals that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  * are larger than what the math used can handle without overflows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	u64 max_nsecs, max_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	 * Calculate the maximum number of cycles that we can pass to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	 * cyc2ns() function without overflowing a 64-bit result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	max_cycles = ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	do_div(max_cycles, mult+maxadj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	 * The actual maximum number of cycles we can defer the clocksource is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	 * determined by the minimum of max_cycles and mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	 * Note: Here we subtract the maxadj to make sure we don't sleep for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	 * too long if there's a large negative adjustment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	max_cycles = min(max_cycles, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	/* return the max_cycles value as well if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (max_cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		*max_cyc = max_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/* Return 50% of the actual maximum, so we can detect bad values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	max_nsecs >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	return max_nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  * @cs:         Pointer to clocksource to be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) static inline void clocksource_update_max_deferment(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 						cs->maxadj, cs->mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 						&cs->max_cycles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	struct clocksource *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	if (!finished_booting || list_empty(&clocksource_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 * We pick the clocksource with the highest rating. If oneshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 * mode is active, we pick the highres valid clocksource with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 * the best rating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	list_for_each_entry(cs, &clocksource_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		if (skipcur && cs == curr_clocksource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		return cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) static void __clocksource_select(bool skipcur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	bool oneshot = tick_oneshot_mode_active();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct clocksource *best, *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	/* Find the best suitable clocksource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	best = clocksource_find_best(oneshot, skipcur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (!best)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	if (!strlen(override_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	/* Check for the override clocksource. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	list_for_each_entry(cs, &clocksource_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		if (skipcur && cs == curr_clocksource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		if (strcmp(cs->name, override_name) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		 * Check to make sure we don't switch to a non-highres
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		 * capable clocksource if the tick code is in oneshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		 * mode (highres or nohz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			/* Override clocksource cannot be used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 				pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 					cs->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 				override_name[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 				 * The override cannot be currently verified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 				 * Deferring to let the watchdog check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 				pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 					cs->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			/* Override clocksource can be used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			best = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (curr_clocksource != best && !timekeeping_notify(best)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		pr_info("Switched to clocksource %s\n", best->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		curr_clocksource = best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885)  * clocksource_select - Select the best clocksource available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887)  * Private function. Must hold clocksource_mutex when called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889)  * Select the clocksource with the best rating, or the clocksource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890)  * which is selected by userspace override.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) static void clocksource_select(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	__clocksource_select(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) static void clocksource_select_fallback(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	__clocksource_select(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) static inline void clocksource_select(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) static inline void clocksource_select_fallback(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * clocksource_done_booting - Called near the end of core bootup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  * Hack to avoid lots of clocksource churn at boot time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  * We use fs_initcall because we want this to start before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913)  * device_initcall but after subsys_initcall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) static int __init clocksource_done_booting(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	curr_clocksource = clocksource_default_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	finished_booting = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	 * Run the watchdog first to eliminate unstable clock sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	__clocksource_watchdog_kthread();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	clocksource_select();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) fs_initcall(clocksource_done_booting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * Enqueue the clocksource sorted by rating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) static void clocksource_enqueue(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	struct list_head *entry = &clocksource_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct clocksource *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	list_for_each_entry(tmp, &clocksource_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		/* Keep track of the place, where to insert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		if (tmp->rating < cs->rating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		entry = &tmp->list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	list_add(&cs->list, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  * __clocksource_update_freq_scale - Used update clocksource with new freq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  * @cs:		clocksource to be registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  * @scale:	Scale factor multiplied against freq to get clocksource hz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  * @freq:	clocksource frequency (cycles per second) divided by scale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  * This should only be called from the clocksource->enable() method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  * This *SHOULD NOT* be called directly! Please use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  * functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	u64 sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	 * Default clocksources are *special* and self-define their mult/shift.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	 * But, you're not special, so you should specify a freq value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		 * Calc the maximum number of seconds which we can run before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		 * wrapping around. For clocksources which have a mask > 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		 * we need to limit the max sleep time to have a good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		 * conversion precision. 10 minutes is still a reasonable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		 * amount. That results in a shift value of 24 for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		 * ~ 0.06ppm granularity for NTP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		sec = cs->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		do_div(sec, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		do_div(sec, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		if (!sec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			sec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		else if (sec > 600 && cs->mask > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			sec = 600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				       NSEC_PER_SEC / scale, sec * scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	 * Ensure clocksources that have large 'mult' values don't overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	 * when adjusted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	cs->maxadj = clocksource_max_adjustment(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	while (freq && ((cs->mult + cs->maxadj < cs->mult)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		|| (cs->mult - cs->maxadj > cs->mult))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		cs->mult >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		cs->shift--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		cs->maxadj = clocksource_max_adjustment(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	 * Only warn for *special* clocksources that self-define
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 * their mult/shift values and don't specify a freq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		"timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		cs->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	clocksource_update_max_deferment(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * __clocksource_register_scale - Used to install new clocksources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  * @cs:		clocksource to be registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  * @scale:	Scale factor multiplied against freq to get clocksource hz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  * @freq:	clocksource frequency (cycles per second) divided by scale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  * Returns -EBUSY if registration fails, zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  * This *SHOULD NOT* be called directly! Please use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * clocksource_register_hz() or clocksource_register_khz helper functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	clocksource_arch_init(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	if (cs->vdso_clock_mode < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	    cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			cs->name, cs->vdso_clock_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/* Initialize mult/shift and max_idle_ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	__clocksource_update_freq_scale(cs, scale, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	/* Add clocksource to the clocksource list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	clocksource_watchdog_lock(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	clocksource_enqueue(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	clocksource_enqueue_watchdog(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	clocksource_watchdog_unlock(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	clocksource_select();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	clocksource_select_watchdog(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	__clocksource_suspend_select(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) EXPORT_SYMBOL_GPL(__clocksource_register_scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static void __clocksource_change_rating(struct clocksource *cs, int rating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	list_del(&cs->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	cs->rating = rating;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	clocksource_enqueue(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)  * clocksource_change_rating - Change the rating of a registered clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)  * @cs:		clocksource to be changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)  * @rating:	new rating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) void clocksource_change_rating(struct clocksource *cs, int rating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	clocksource_watchdog_lock(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	__clocksource_change_rating(cs, rating);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	clocksource_watchdog_unlock(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	clocksource_select();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	clocksource_select_watchdog(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	clocksource_suspend_select(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) EXPORT_SYMBOL(clocksource_change_rating);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)  * Unbind clocksource @cs. Called with clocksource_mutex held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static int clocksource_unbind(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	if (clocksource_is_watchdog(cs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		/* Select and try to install a replacement watchdog. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		clocksource_select_watchdog(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		if (clocksource_is_watchdog(cs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (cs == curr_clocksource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		/* Select and try to install a replacement clock source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		clocksource_select_fallback();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		if (curr_clocksource == cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	if (clocksource_is_suspend(cs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		 * Select and try to install a replacement suspend clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		 * If no replacement suspend clocksource, we will just let the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		 * clocksource go and have no suspend clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		clocksource_suspend_select(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	clocksource_watchdog_lock(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	clocksource_dequeue_watchdog(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	list_del_init(&cs->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	clocksource_watchdog_unlock(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  * clocksource_unregister - remove a registered clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)  * @cs:	clocksource to be unregistered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) int clocksource_unregister(struct clocksource *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	if (!list_empty(&cs->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		ret = clocksource_unbind(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) EXPORT_SYMBOL(clocksource_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  * current_clocksource_show - sysfs interface for current clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  * @dev:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  * @attr:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)  * @buf:	char buffer to be filled with clocksource list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  * Provides sysfs interface for listing current clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static ssize_t current_clocksource_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 					struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 					char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	ssize_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	size_t ret = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	/* strings from sysfs write are not 0 terminated! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	if (!cnt || cnt >= CS_NAME_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	/* strip of \n: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (buf[cnt-1] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	if (cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		memcpy(dst, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	dst[cnt] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  * current_clocksource_store - interface for manually overriding clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)  * @dev:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)  * @attr:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  * @buf:	name of override clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * @count:	length of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  * Takes input from sysfs interface for manually overriding the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  * clocksource selection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static ssize_t current_clocksource_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 					 struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 					 const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	ret = sysfs_get_uname(buf, override_name, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		clocksource_select();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static DEVICE_ATTR_RW(current_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)  * unbind_clocksource_store - interface for manually unbinding clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  * @dev:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  * @attr:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  * @buf:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * @count:	length of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  * Takes input from sysfs interface for manually unbinding a clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static ssize_t unbind_clocksource_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 					struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 					const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	struct clocksource *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	char name[CS_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	ret = sysfs_get_uname(buf, name, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	list_for_each_entry(cs, &clocksource_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		if (strcmp(cs->name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		ret = clocksource_unbind(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static DEVICE_ATTR_WO(unbind_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)  * available_clocksource_show - sysfs interface for listing clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  * @dev:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * @attr:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  * @buf:	char buffer to be filled with clocksource list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  * Provides sysfs interface for listing registered clocksources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static ssize_t available_clocksource_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 					  struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 					  char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	struct clocksource *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	ssize_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	list_for_each_entry(src, &clocksource_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		 * Don't show non-HRES clocksource if the tick code is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		 * in one shot mode (highres=on or nohz=on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		if (!tick_oneshot_mode_active() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			count += snprintf(buf + count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 				  "%s ", src->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	count += snprintf(buf + count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static DEVICE_ATTR_RO(available_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static struct attribute *clocksource_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	&dev_attr_current_clocksource.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	&dev_attr_unbind_clocksource.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	&dev_attr_available_clocksource.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) ATTRIBUTE_GROUPS(clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) static struct bus_type clocksource_subsys = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	.name = "clocksource",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	.dev_name = "clocksource",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static struct device device_clocksource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	.id	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	.bus	= &clocksource_subsys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	.groups	= clocksource_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) static int __init init_clocksource_sysfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	int error = subsys_system_register(&clocksource_subsys, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		error = device_register(&device_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) device_initcall(init_clocksource_sysfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) #endif /* CONFIG_SYSFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  * boot_override_clocksource - boot clock override
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  * @str:	override name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)  * Takes a clocksource= boot argument and uses it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)  * as the clocksource override name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static int __init boot_override_clocksource(char* str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	mutex_lock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	if (str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		strlcpy(override_name, str, sizeof(override_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	mutex_unlock(&clocksource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) __setup("clocksource=", boot_override_clocksource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  * boot_override_clock - Compatibility layer for deprecated boot option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)  * @str:	override name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  * DEPRECATED! Takes a clock= boot argument and uses it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * as the clocksource override name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static int __init boot_override_clock(char* str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	if (!strcmp(str, "pmtmr")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		return boot_override_clocksource("acpi_pm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	return boot_override_clocksource(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) __setup("clock=", boot_override_clock);