^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * High-resolution kernel timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * In contrast to the low-resolution timeout API, aka timer wheel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * hrtimers provide finer resolution and accuracy depending on system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * configuration and capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Started by: Thomas Gleixner and Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Credits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Based on the original timer wheel code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Help, testing, suggestions, bugfixes, improvements were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * provided by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * et. al.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/debugobjects.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/sched/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/sched/rt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/sched/deadline.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/sched/nohz.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <trace/events/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "tick-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Masks for selecting the soft and hard context timers from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * cpu_base->active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * The timer bases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * There are more clockids than hrtimer bases. Thus, we index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * into the timer bases by the hrtimer_base_type enum. When trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * to reach a base using a clockid, hrtimer_clockid_to_base()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * is used to convert from clockid to the proper hrtimer_base_type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) .clock_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) .index = HRTIMER_BASE_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .clockid = CLOCK_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) .get_time = &ktime_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) .index = HRTIMER_BASE_REALTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) .clockid = CLOCK_REALTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .get_time = &ktime_get_real,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .index = HRTIMER_BASE_BOOTTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .clockid = CLOCK_BOOTTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .get_time = &ktime_get_boottime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) .index = HRTIMER_BASE_TAI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) .clockid = CLOCK_TAI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) .get_time = &ktime_get_clocktai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) .index = HRTIMER_BASE_MONOTONIC_SOFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) .clockid = CLOCK_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) .get_time = &ktime_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .index = HRTIMER_BASE_REALTIME_SOFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .clockid = CLOCK_REALTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .get_time = &ktime_get_real,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .index = HRTIMER_BASE_BOOTTIME_SOFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .clockid = CLOCK_BOOTTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) .get_time = &ktime_get_boottime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .index = HRTIMER_BASE_TAI_SOFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .clockid = CLOCK_TAI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .get_time = &ktime_get_clocktai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Make sure we catch unsupported clockids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) [CLOCK_TAI] = HRTIMER_BASE_TAI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Functions and macros which are different for UP/SMP systems are kept in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * single place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * such that hrtimer_callback_running() can unconditionally dereference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * timer->base->cpu_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static struct hrtimer_cpu_base migration_cpu_base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) .clock_base = { {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) .cpu_base = &migration_cpu_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) &migration_cpu_base.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define migration_base migration_cpu_base.clock_base[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline bool is_migration_base(struct hrtimer_clock_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return base == &migration_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * means that all timers which are tied to this base via timer->base are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * locked, and the base itself is locked too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * So __run_timers/migrate_timers can safely modify all timers which could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * be found on the lists/queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * When the timer's base is locked, and the timer removed from list, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * possible to set timer->base = &migration_base and drop the lock: the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * remains locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct hrtimer_clock_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) base = READ_ONCE(timer->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (likely(base != &migration_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (likely(base == timer->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* The timer has migrated to another CPU: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * We do not migrate the timer when it is expiring before the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * event on the target cpu. When high resolution is enabled, we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * reprogram the target cpu hardware and we would cause it to fire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * late. To keep it simple, we handle the high resolution enabled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * disabled case similar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Called with cpu_base->lock of target cpu held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ktime_t expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return expires < new_base->cpu_base->expires_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int pinned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (static_branch_likely(&timers_migration_enabled) && !pinned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return &per_cpu(hrtimer_bases, get_nohz_timer_target());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * We switch the timer base to a power-optimized selected CPU target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * - NO_HZ_COMMON is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * - timer migration is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * - the timer callback is not running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * - the timer is not the first expiring timer on the new target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * If one of the above requirements is not fulfilled we move the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * to the current CPU or leave it on the previously assigned CPU if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * the timer callback is currently running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static inline struct hrtimer_clock_base *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int pinned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct hrtimer_clock_base *new_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int basenum = base->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) this_cpu_base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) new_cpu_base = get_target_base(this_cpu_base, pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) new_base = &new_cpu_base->clock_base[basenum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (base != new_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * We are trying to move timer to new_base.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * However we can't change timer's base while it is running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * so we keep it on the same CPU. No hassle vs. reprogramming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * the event source in the high resolution case. The softirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * code will take care of this when the timer function has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * completed. There is no conflict as we hold the lock until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * the timer is enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (unlikely(hrtimer_callback_running(timer)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* See the comment in lock_hrtimer_base() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) WRITE_ONCE(timer->base, &migration_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) raw_spin_unlock(&base->cpu_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) raw_spin_lock(&new_base->cpu_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (new_cpu_base != this_cpu_base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) hrtimer_check_target(timer, new_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) raw_spin_unlock(&new_base->cpu_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) raw_spin_lock(&base->cpu_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) new_cpu_base = this_cpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) WRITE_ONCE(timer->base, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) WRITE_ONCE(timer->base, new_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (new_cpu_base != this_cpu_base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) hrtimer_check_target(timer, new_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) new_cpu_base = this_cpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return new_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #else /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static inline bool is_migration_base(struct hrtimer_clock_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static inline struct hrtimer_clock_base *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct hrtimer_clock_base *base = timer->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) # define switch_hrtimer_base(t, b, p) (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #endif /* !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Functions for the union type storage format of ktime_t which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * too large for inlining:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #if BITS_PER_LONG < 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Divide a ktime value by a nanosecond value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) s64 __ktime_divns(const ktime_t kt, s64 div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int sft = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) s64 dclc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dclc = ktime_to_ns(kt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) tmp = dclc < 0 ? -dclc : dclc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Make sure the divisor is less than 2^32: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) while (div >> 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) sft++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) div >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) tmp >>= sft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) do_div(tmp, (u32) div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return dclc < 0 ? -tmp : tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) EXPORT_SYMBOL_GPL(__ktime_divns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #endif /* BITS_PER_LONG >= 64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * Add two ktime values and do a safety check for overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ktime_t res = ktime_add_unsafe(lhs, rhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * We use KTIME_SEC_MAX here, the maximum timeout which we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * return to user space in a timespec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (res < 0 || res < lhs || res < rhs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) res = ktime_set(KTIME_SEC_MAX, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) EXPORT_SYMBOL_GPL(ktime_add_safe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static const struct debug_obj_descr hrtimer_debug_descr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void *hrtimer_debug_hint(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return ((struct hrtimer *) addr)->function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * fixup_init is called when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * - an active object is initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct hrtimer *timer = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) case ODEBUG_STATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) hrtimer_cancel(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) debug_object_init(timer, &hrtimer_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * fixup_activate is called when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * - an active object is activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * - an unknown non-static object is activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) case ODEBUG_STATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * fixup_free is called when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * - an active object is freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct hrtimer *timer = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) case ODEBUG_STATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) hrtimer_cancel(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) debug_object_free(timer, &hrtimer_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static const struct debug_obj_descr hrtimer_debug_descr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .name = "hrtimer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) .debug_hint = hrtimer_debug_hint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) .fixup_init = hrtimer_fixup_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .fixup_activate = hrtimer_fixup_activate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) .fixup_free = hrtimer_fixup_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static inline void debug_hrtimer_init(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) debug_object_init(timer, &hrtimer_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static inline void debug_hrtimer_activate(struct hrtimer *timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) debug_object_activate(timer, &hrtimer_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) debug_object_deactivate(timer, &hrtimer_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) enum hrtimer_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) debug_object_init_on_stack(timer, &hrtimer_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) __hrtimer_init(timer, clock_id, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) clockid_t clock_id, enum hrtimer_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) clockid_t clock_id, enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) __hrtimer_init_sleeper(sl, clock_id, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) void destroy_hrtimer_on_stack(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) debug_object_free(timer, &hrtimer_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static inline void debug_hrtimer_init(struct hrtimer *timer) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static inline void debug_hrtimer_activate(struct hrtimer *timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) enum hrtimer_mode mode) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) debug_init(struct hrtimer *timer, clockid_t clockid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) debug_hrtimer_init(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) trace_hrtimer_init(timer, clockid, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static inline void debug_activate(struct hrtimer *timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) debug_hrtimer_activate(timer, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) trace_hrtimer_start(timer, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static inline void debug_deactivate(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) debug_hrtimer_deactivate(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) trace_hrtimer_cancel(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static struct hrtimer_clock_base *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!*active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) idx = __ffs(*active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) *active &= ~(1U << idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return &cpu_base->clock_base[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #define for_each_active_base(base, cpu_base, active) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) while ((base = __next_base((cpu_base), &(active))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) const struct hrtimer *exclude,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned int active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ktime_t expires_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct hrtimer_clock_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ktime_t expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) for_each_active_base(base, cpu_base, active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct timerqueue_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct hrtimer *timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) next = timerqueue_getnext(&base->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) timer = container_of(next, struct hrtimer, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (timer == exclude) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Get to the next timer in the queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) next = timerqueue_iterate_next(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) timer = container_of(next, struct hrtimer, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (expires < expires_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) expires_next = expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* Skip cpu_base update if a timer is being excluded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (exclude)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (timer->is_soft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) cpu_base->softirq_next_timer = timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) cpu_base->next_timer = timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * clock_was_set() might have changed base->offset of any of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * the clock bases so the result might be negative. Fix it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * to prevent a false positive in clockevents_program_event().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (expires_next < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) expires_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return expires_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Recomputes cpu_base::*next_timer and returns the earliest expires_next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * but does not set cpu_base::*expires_next, that is done by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * cpu_base::*expires_next right away, reprogramming logic would no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * those timers will get run whenever the softirq gets handled, at the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * @active_mask must be one of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * - HRTIMER_ACTIVE_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * - HRTIMER_ACTIVE_SOFT, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * - HRTIMER_ACTIVE_HARD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static ktime_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) unsigned int active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct hrtimer *next_timer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ktime_t expires_next = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) cpu_base->softirq_next_timer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) expires_next = __hrtimer_next_event_base(cpu_base, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) active, KTIME_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) next_timer = cpu_base->softirq_next_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (active_mask & HRTIMER_ACTIVE_HARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) cpu_base->next_timer = next_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) expires_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return expires_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ktime_t expires_next, soft = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * If the soft interrupt has already been activated, ignore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * soft bases. They will be handled in the already raised soft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (!cpu_base->softirq_activated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * Update the soft expiry time. clock_settime() might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * affected it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) cpu_base->softirq_expires_next = soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * If a softirq timer is expiring first, update cpu_base->next_timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * and program the hardware with the soft expiry time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (expires_next > soft) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) cpu_base->next_timer = cpu_base->softirq_next_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) expires_next = soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return expires_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) offs_real, offs_boot, offs_tai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * Is the high resolution mode active ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) cpu_base->hres_active : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static inline int hrtimer_hres_active(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * Reprogram the event source with checking both queues for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * next event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * Called with interrupts disabled and base->lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ktime_t expires_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) expires_next = hrtimer_update_next_event(cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (skip_equal && expires_next == cpu_base->expires_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) cpu_base->expires_next = expires_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * If hres is not active, hardware does not have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * reprogrammed yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * If a hang was detected in the last timer interrupt then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * leave the hang delay active in the hardware. We want the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * system to make progress. That also prevents the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * scenario:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * T1 expires 50ms from now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * T2 expires 5s from now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * T1 is removed, so this code is called and would reprogram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * the hardware to 5s from now. Any hrtimer_start after that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * will not reprogram the hardware due to hang_detected being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * set. So we'd effectivly block all timers until the T2 event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * fires.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) tick_program_event(cpu_base->expires_next, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* High resolution timer related functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #ifdef CONFIG_HIGH_RES_TIMERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * High resolution timer enabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static bool hrtimer_hres_enabled __read_mostly = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) EXPORT_SYMBOL_GPL(hrtimer_resolution);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * Enable / Disable high resolution mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static int __init setup_hrtimer_hres(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) __setup("highres=", setup_hrtimer_hres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * hrtimer_high_res_enabled - query, if the highres mode is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static inline int hrtimer_is_hres_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return hrtimer_hres_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * Retrigger next event is called after clock was set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Called with interrupts disabled via on_each_cpu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static void retrigger_next_event(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (!__hrtimer_hres_active(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) raw_spin_lock(&base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) hrtimer_update_base(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) hrtimer_force_reprogram(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) raw_spin_unlock(&base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * Switch to high resolution mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static void hrtimer_switch_to_hres(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (tick_init_highres()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) pr_warn("Could not switch to high resolution mode on CPU %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) base->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) base->hres_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) hrtimer_resolution = HIGH_RES_NSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) tick_setup_sched_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* "Retrigger" the interrupt to get things going */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) retrigger_next_event(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static inline int hrtimer_is_hres_enabled(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static inline void hrtimer_switch_to_hres(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static inline void retrigger_next_event(void *arg) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #endif /* CONFIG_HIGH_RES_TIMERS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * When a timer is enqueued and expires earlier than the already enqueued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * timers, we have to check, whether it expires earlier than the timer for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * which the clock event device was armed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * Called with interrupts disabled and base->cpu_base.lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct hrtimer_clock_base *base = timer->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * CLOCK_REALTIME timer might be requested with an absolute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * expiry time which is less than base->offset. Set it to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (expires < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (timer->is_soft) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * soft hrtimer could be started on a remote CPU. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * case softirq_expires_next needs to be updated on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * remote CPU. The soft hrtimer will not expire before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * first hard hrtimer on the remote CPU -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * hrtimer_check_target() prevents this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (timer_cpu_base->softirq_activated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) timer_cpu_base->softirq_next_timer = timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) timer_cpu_base->softirq_expires_next = expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (!ktime_before(expires, timer_cpu_base->expires_next) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) !reprogram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * If the timer is not on the current cpu, we cannot reprogram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * the other cpus clock event device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (base->cpu_base != cpu_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * If the hrtimer interrupt is running, then it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * reevaluate the clock bases and reprogram the clock event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * device. The callbacks are always executed in hard interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * context so we don't need an extra check for a running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (cpu_base->in_hrtirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (expires >= cpu_base->expires_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Update the pointer to the next expiring timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cpu_base->next_timer = timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cpu_base->expires_next = expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * If hres is not active, hardware does not have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * programmed yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * If a hang was detected in the last timer interrupt then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * do not schedule a timer which is earlier than the expiry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * which we enforced in the hang detection. We want the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * to make progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * Program the timer hardware. We enforce the expiry for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * events which are already in the past.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) tick_program_event(expires, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * Clock realtime was set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * Change the offset of the realtime clock vs. the monotonic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * We might have to reprogram the high resolution timer interrupt. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * SMP we call the architecture specific code to retrigger _all_ high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * resolution timer interrupts. On UP we just disable interrupts and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * call the high resolution interrupt code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) void clock_was_set(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) #ifdef CONFIG_HIGH_RES_TIMERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* Retrigger the CPU local events everywhere */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) on_each_cpu(retrigger_next_event, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) timerfd_clock_was_set();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static void clock_was_set_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) clock_was_set();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static DECLARE_WORK(hrtimer_work, clock_was_set_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Called from timekeeping and resume code to reprogram the hrtimer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * interrupt device on all cpus and to notify timerfd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) void clock_was_set_delayed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) schedule_work(&hrtimer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * During resume we might have to reprogram the high resolution timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * interrupt on all online CPUs. However, all other CPUs will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * stopped with IRQs interrupts disabled so the clock_was_set() call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * must be deferred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) void hrtimers_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* Retrigger on the local CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) retrigger_next_event(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* And schedule a retrigger for all others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) clock_was_set_delayed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * Counterpart to lock_hrtimer_base above:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * hrtimer_forward - forward the timer expiry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * @timer: hrtimer to forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * @now: forward past this time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * @interval: the interval to forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * Forward the timer expiry so it will expire in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * Returns the number of overruns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Can be safely called from the callback function of @timer. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * called from other contexts @timer must neither be enqueued nor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * running the callback and the caller needs to take care of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * serialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * Note: This only updates the timer expiry value and does not requeue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) u64 orun = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ktime_t delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) delta = ktime_sub(now, hrtimer_get_expires(timer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (delta < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (interval < hrtimer_resolution)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) interval = hrtimer_resolution;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (unlikely(delta >= interval)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) s64 incr = ktime_to_ns(interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) orun = ktime_divns(delta, incr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) hrtimer_add_expires_ns(timer, incr * orun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (hrtimer_get_expires_tv64(timer) > now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return orun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * This (and the ktime_add() below) is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * correction for exact:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) orun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) hrtimer_add_expires(timer, interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return orun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) EXPORT_SYMBOL_GPL(hrtimer_forward);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * enqueue_hrtimer - internal function to (re)start a timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * The timer is inserted in expiry order. Insertion into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * red black tree is O(log(n)). Must hold the base lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * Returns 1 when the new timer is the leftmost timer in the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static int enqueue_hrtimer(struct hrtimer *timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct hrtimer_clock_base *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) debug_activate(timer, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) base->cpu_base->active_bases |= 1 << base->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* Pairs with the lockless read in hrtimer_is_queued() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return timerqueue_add(&base->active, &timer->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * __remove_hrtimer - internal function to remove a timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * Caller must hold the base lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * High resolution timer mode reprograms the clock event device when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * timer is the one which expires next. The caller can disable this by setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * reprogram to zero. This is useful, when the context does a reprogramming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * anyway (e.g. timer interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void __remove_hrtimer(struct hrtimer *timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct hrtimer_clock_base *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) u8 newstate, int reprogram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct hrtimer_cpu_base *cpu_base = base->cpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) u8 state = timer->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /* Pairs with the lockless read in hrtimer_is_queued() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) WRITE_ONCE(timer->state, newstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (!(state & HRTIMER_STATE_ENQUEUED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (!timerqueue_del(&base->active, &timer->node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) cpu_base->active_bases &= ~(1 << base->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * Note: If reprogram is false we do not update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * cpu_base->next_timer. This happens when we remove the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * timer on a remote cpu. No harm as we never dereference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * cpu_base->next_timer. So the worst thing what can happen is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * an superflous call to hrtimer_force_reprogram() on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * remote cpu later on if the same timer gets enqueued again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (reprogram && timer == cpu_base->next_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) hrtimer_force_reprogram(cpu_base, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * remove hrtimer, called with base lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) bool restart, bool keep_local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) u8 state = timer->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (state & HRTIMER_STATE_ENQUEUED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) bool reprogram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * Remove the timer and force reprogramming when high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * resolution mode is active and the timer is on the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * CPU. If we remove a timer on another CPU, reprogramming is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * skipped. The interrupt event on this CPU is fired and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * reprogramming happens in the interrupt handler. This is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * rare case and less expensive than a smp call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) debug_deactivate(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * If the timer is not restarted then reprogramming is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * required if the timer is local. If it is local and about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * to be restarted, avoid programming it twice (on removal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * and a moment later when it's requeued).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) state = HRTIMER_STATE_INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) reprogram &= !keep_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) __remove_hrtimer(timer, base, state, reprogram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) const enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) #ifdef CONFIG_TIME_LOW_RES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * CONFIG_TIME_LOW_RES indicates that the system has no way to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * granular time values. For relative timers we add hrtimer_resolution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * (i.e. one jiffie) to prevent short timeouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) timer->is_rel = mode & HRTIMER_MODE_REL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (timer->is_rel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) tim = ktime_add_safe(tim, hrtimer_resolution);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ktime_t expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * Find the next SOFT expiration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * reprogramming needs to be triggered, even if the next soft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * hrtimer expires at the same time than the next hard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * hrtimer. cpu_base->softirq_expires_next needs to be updated!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (expires == KTIME_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * cpu_base->*expires_next is only set by hrtimer_reprogram()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) u64 delta_ns, const enum hrtimer_mode mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct hrtimer_clock_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct hrtimer_clock_base *new_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) bool force_local, first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * If the timer is on the local cpu base and is the first expiring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * timer then this might end up reprogramming the hardware twice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * (on removal and on enqueue). To avoid that by prevent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * reprogram on removal, keep the timer local to the current CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * and enforce reprogramming after it is queued no matter whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * it is the new first expiring timer again or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) force_local &= base->cpu_base->next_timer == timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * Remove an active timer from the queue. In case it is not queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * on the current CPU, make sure that remove_hrtimer() updates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * remote data correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * If it's on the current CPU and the first expiring timer, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * skip reprogramming, keep the timer local and enforce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * reprogramming later if it was the first expiring timer. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * avoids programming the underlying clock event twice (once at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * removal and once after enqueue).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) remove_hrtimer(timer, base, true, force_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (mode & HRTIMER_MODE_REL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) tim = ktime_add_safe(tim, base->get_time());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) tim = hrtimer_update_lowres(timer, tim, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) hrtimer_set_expires_range_ns(timer, tim, delta_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* Switch the timer base, if necessary: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (!force_local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) new_base = switch_hrtimer_base(timer, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) mode & HRTIMER_MODE_PINNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) new_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) first = enqueue_hrtimer(timer, new_base, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (!force_local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * Timer was forced to stay on the current CPU to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * reprogramming on removal and enqueue. Force reprogram the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * hardware by evaluating the new first expiring timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) hrtimer_force_reprogram(new_base->cpu_base, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * hrtimer_start_range_ns - (re)start an hrtimer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * @timer: the timer to be added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * @tim: expiry time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * @delta_ns: "slack" range for the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * softirq based mode is considered for debug purpose only!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) u64 delta_ns, const enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) struct hrtimer_clock_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * expiry mode because unmarked timers are moved to softirq expiry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (!IS_ENABLED(CONFIG_PREEMPT_RT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) base = lock_hrtimer_base(timer, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) hrtimer_reprogram(timer, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) unlock_hrtimer_base(timer, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * hrtimer_try_to_cancel - try to deactivate a timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * @timer: hrtimer to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * * 0 when the timer was not active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * * 1 when the timer was active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * * -1 when the timer is currently executing the callback function and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * cannot be stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) int hrtimer_try_to_cancel(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct hrtimer_clock_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * Check lockless first. If the timer is not active (neither
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * enqueued nor running the callback, nothing to do here. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * base lock does not serialize against a concurrent enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * so we can avoid taking it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (!hrtimer_active(timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) base = lock_hrtimer_base(timer, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!hrtimer_callback_running(timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ret = remove_hrtimer(timer, base, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) unlock_hrtimer_base(timer, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #ifdef CONFIG_PREEMPT_RT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) spin_lock_init(&base->softirq_expiry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) spin_lock(&base->softirq_expiry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) spin_unlock(&base->softirq_expiry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * The counterpart to hrtimer_cancel_wait_running().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * the timer callback to finish. Drop expiry_lock and reaquire it. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * allows the waiter to acquire the lock and make progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (atomic_read(&cpu_base->timer_waiters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) spin_unlock(&cpu_base->softirq_expiry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) spin_lock(&cpu_base->softirq_expiry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) raw_spin_lock_irq(&cpu_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * This function is called on PREEMPT_RT kernels when the fast path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * deletion of a timer failed because the timer callback function was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * This prevents priority inversion: if the soft irq thread is preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * in the middle of a timer callback, then calling del_timer_sync() can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * lead to two issues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * - If the caller is on a remote CPU then it has to spin wait for the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * handler to complete. This can result in unbound priority inversion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * - If the caller originates from the task which preempted the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * handler on the same CPU, then spin waiting for the timer handler to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * complete is never going to end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) void hrtimer_cancel_wait_running(const struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* Lockless read. Prevent the compiler from reloading it below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct hrtimer_clock_base *base = READ_ONCE(timer->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * Just relax if the timer expires in hard interrupt context or if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * it is currently on the migration base.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (!timer->is_soft || is_migration_base(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * Mark the base as contended and grab the expiry lock, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * held by the softirq across the timer callback. Drop the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * immediately so the softirq can expire the next timer. In theory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * the timer could already be running again, but that's more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * unlikely and just causes another wait loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) atomic_inc(&base->cpu_base->timer_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) atomic_dec(&base->cpu_base->timer_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) unsigned long flags) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * hrtimer_cancel - cancel a timer and wait for the handler to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * @timer: the timer to be cancelled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * 0 when the timer was not active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * 1 when the timer was active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) int hrtimer_cancel(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) ret = hrtimer_try_to_cancel(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) hrtimer_cancel_wait_running(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) } while (ret < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) EXPORT_SYMBOL_GPL(hrtimer_cancel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * hrtimer_get_remaining - get remaining time for the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * @timer: the timer to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) ktime_t rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) lock_hrtimer_base(timer, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) rem = hrtimer_expires_remaining_adjusted(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) rem = hrtimer_expires_remaining(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) unlock_hrtimer_base(timer, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) #ifdef CONFIG_NO_HZ_COMMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * hrtimer_get_next_event - get the time until next expiry event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * Returns the next expiry time or KTIME_MAX if no timer is pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) u64 hrtimer_get_next_event(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) u64 expires = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) raw_spin_lock_irqsave(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (!__hrtimer_hres_active(cpu_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * hrtimer_next_event_without - time until next expiry event w/o one timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * @exclude: timer to exclude
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * Returns the next expiry time over all timers except for the @exclude one or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * KTIME_MAX if none of them is pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) u64 hrtimer_next_event_without(const struct hrtimer *exclude)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) u64 expires = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) raw_spin_lock_irqsave(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (__hrtimer_hres_active(cpu_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) unsigned int active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (!cpu_base->softirq_activated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) expires = __hrtimer_next_event_base(cpu_base, exclude,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) active, KTIME_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) expires = __hrtimer_next_event_base(cpu_base, exclude, active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) static inline int hrtimer_clockid_to_base(clockid_t clock_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (likely(clock_id < MAX_CLOCKS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) int base = hrtimer_clock_to_base_table[clock_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (likely(base != HRTIMER_MAX_CLOCK_BASES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return HRTIMER_BASE_MONOTONIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct hrtimer_cpu_base *cpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) int base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * On PREEMPT_RT enabled kernels hrtimers which are not explicitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * marked for hard interrupt expiry mode are moved into soft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * interrupt context for latency reasons and because the callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * can invoke functions which might sleep on RT, e.g. spin_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) softtimer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) memset(timer, 0, sizeof(struct hrtimer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) cpu_base = raw_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * clock modifications, so they needs to become CLOCK_MONOTONIC to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * ensure POSIX compliance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) clock_id = CLOCK_MONOTONIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) base += hrtimer_clockid_to_base(clock_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) timer->is_soft = softtimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) timer->base = &cpu_base->clock_base[base];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) timerqueue_init(&timer->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * hrtimer_init - initialize a timer to the given clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * @timer: the timer to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * @clock_id: the clock to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * @mode: The modes which are relevant for intitialization:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * HRTIMER_MODE_REL_SOFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * The PINNED variants of the above can be handed in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * but the PINNED bit is ignored as pinning happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * when the hrtimer is started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) debug_init(timer, clock_id, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) __hrtimer_init(timer, clock_id, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) EXPORT_SYMBOL_GPL(hrtimer_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * A timer is active, when it is enqueued into the rbtree or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * callback function is running or it's in the state of being migrated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * to another cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * It is important for this function to not return a false negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) bool hrtimer_active(const struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) struct hrtimer_clock_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) base = READ_ONCE(timer->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) seq = raw_read_seqcount_begin(&base->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (timer->state != HRTIMER_STATE_INACTIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) base->running == timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) } while (read_seqcount_retry(&base->seq, seq) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) base != READ_ONCE(timer->base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) EXPORT_SYMBOL_GPL(hrtimer_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * distinct sections:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * - queued: the timer is queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * - callback: the timer is being ran
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * - post: the timer is inactive or (re)queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * On the read side we ensure we observe timer->state and cpu_base->running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * from the same section, if anything changed while we looked at it, we retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * This includes timer->base changing because sequence numbers alone are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * insufficient for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * The sequence numbers are required because otherwise we could still observe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * a false negative if the read side got smeared over multiple consequtive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * __run_hrtimer() invocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct hrtimer_clock_base *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct hrtimer *timer, ktime_t *now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) unsigned long flags) __must_hold(&cpu_base->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) enum hrtimer_restart (*fn)(struct hrtimer *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) bool expires_in_hardirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) int restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) lockdep_assert_held(&cpu_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) debug_deactivate(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) base->running = timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * Separate the ->running assignment from the ->state assignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * As with a regular write barrier, this ensures the read side in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * hrtimer_active() cannot observe base->running == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * timer->state == INACTIVE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) raw_write_seqcount_barrier(&base->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) fn = timer->function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * timer is restarted with a period then it becomes an absolute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * timer. If its not restarted it does not matter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (IS_ENABLED(CONFIG_TIME_LOW_RES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) timer->is_rel = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * The timer is marked as running in the CPU base, so it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * protected against migration to a different CPU even if the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * is dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) trace_hrtimer_expire_entry(timer, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) expires_in_hardirq = lockdep_hrtimer_enter(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) restart = fn(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) lockdep_hrtimer_exit(expires_in_hardirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) trace_hrtimer_expire_exit(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) raw_spin_lock_irq(&cpu_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * Note: We clear the running state after enqueue_hrtimer and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * we do not reprogram the event hardware. Happens either in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * hrtimer_start_range_ns() or in hrtimer_interrupt()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * Note: Because we dropped the cpu_base->lock above,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * hrtimer_start_range_ns() can have popped in and enqueued the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * for us already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (restart != HRTIMER_NORESTART &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) !(timer->state & HRTIMER_STATE_ENQUEUED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * Separate the ->running assignment from the ->state assignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * As with a regular write barrier, this ensures the read side in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * hrtimer_active() cannot observe base->running.timer == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * timer->state == INACTIVE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) raw_write_seqcount_barrier(&base->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) WARN_ON_ONCE(base->running != timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) base->running = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) unsigned long flags, unsigned int active_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) struct hrtimer_clock_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) unsigned int active = cpu_base->active_bases & active_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) for_each_active_base(base, cpu_base, active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct timerqueue_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) ktime_t basenow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) basenow = ktime_add(now, base->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) while ((node = timerqueue_getnext(&base->active))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct hrtimer *timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) timer = container_of(node, struct hrtimer, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * The immediate goal for using the softexpires is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) * minimizing wakeups, not running timers at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * earliest interrupt after their soft expiration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * This allows us to avoid using a Priority Search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * Tree, which can answer a stabbing querry for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * overlapping intervals and instead use the simple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * BST we already have.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * We don't add extra wakeups by delaying timers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * are right-of a not yet expired timer, because that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * timer will have to trigger a wakeup anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (basenow < hrtimer_get_softexpires_tv64(timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) __run_hrtimer(cpu_base, base, timer, &basenow, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (active_mask == HRTIMER_ACTIVE_SOFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) hrtimer_sync_wait_running(cpu_base, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) hrtimer_cpu_base_lock_expiry(cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) raw_spin_lock_irqsave(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) now = hrtimer_update_base(cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) cpu_base->softirq_activated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) hrtimer_update_softirq_timer(cpu_base, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) hrtimer_cpu_base_unlock_expiry(cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) #ifdef CONFIG_HIGH_RES_TIMERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * High resolution timer interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * Called with interrupts disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) void hrtimer_interrupt(struct clock_event_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) ktime_t expires_next, now, entry_time, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) BUG_ON(!cpu_base->hres_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) cpu_base->nr_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) dev->next_event = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) raw_spin_lock_irqsave(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) entry_time = now = hrtimer_update_base(cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) cpu_base->in_hrtirq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) * We set expires_next to KTIME_MAX here with cpu_base->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * held to prevent that a timer is enqueued in our queue via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * the migration code. This does not affect enqueueing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * timers which run their callback and need to be requeued on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) cpu_base->expires_next = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (!ktime_before(now, cpu_base->softirq_expires_next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) cpu_base->softirq_expires_next = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) cpu_base->softirq_activated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) raise_softirq_irqoff(HRTIMER_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /* Reevaluate the clock bases for the [soft] next expiry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) expires_next = hrtimer_update_next_event(cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) * Store the new expiry value so the migration code can verify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * against it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) cpu_base->expires_next = expires_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) cpu_base->in_hrtirq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /* Reprogramming necessary ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (!tick_program_event(expires_next, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) cpu_base->hang_detected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * The next timer was already expired due to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * - tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * - long lasting callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * - being scheduled away when running in a VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * We need to prevent that we loop forever in the hrtimer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * interrupt routine. We give it 3 attempts to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * overreacting on some spurious event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * Acquire base lock for updating the offsets and retrieving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * the current time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) raw_spin_lock_irqsave(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) now = hrtimer_update_base(cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) cpu_base->nr_retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (++retries < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * Give the system a chance to do something else than looping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) * here. We stored the entry time, so we know exactly how long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * we spent here. We schedule the next event this amount of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) * time away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) cpu_base->nr_hangs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) cpu_base->hang_detected = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) delta = ktime_sub(now, entry_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if ((unsigned int)delta > cpu_base->max_hang_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) cpu_base->max_hang_time = (unsigned int) delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * Limit it to a sensible value as we enforce a longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * delay. Give the CPU at least 100ms to catch up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (delta > 100 * NSEC_PER_MSEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) expires_next = ktime_add(now, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) tick_program_event(expires_next, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /* called with interrupts disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) static inline void __hrtimer_peek_ahead_timers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) struct tick_device *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (!hrtimer_hres_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) td = this_cpu_ptr(&tick_cpu_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (td && td->evtdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) hrtimer_interrupt(td->evtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) #else /* CONFIG_HIGH_RES_TIMERS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static inline void __hrtimer_peek_ahead_timers(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) #endif /* !CONFIG_HIGH_RES_TIMERS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * Called from run_local_timers in hardirq context every jiffy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) void hrtimer_run_queues(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) ktime_t now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (__hrtimer_hres_active(cpu_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * This _is_ ugly: We have to check periodically, whether we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * can switch to highres and / or nohz mode. The clocksource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * switch happens with xtime_lock held. Notification from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * there only sets the check bit in the tick_oneshot code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * otherwise we might deadlock vs. xtime_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) hrtimer_switch_to_hres();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) raw_spin_lock_irqsave(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) now = hrtimer_update_base(cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (!ktime_before(now, cpu_base->softirq_expires_next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) cpu_base->softirq_expires_next = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) cpu_base->softirq_activated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) raise_softirq_irqoff(HRTIMER_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * Sleep related functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct hrtimer_sleeper *t =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) container_of(timer, struct hrtimer_sleeper, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct task_struct *task = t->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) t->task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) wake_up_process(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * @sl: sleeper to be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * @mode: timer mode abs/rel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * Make the enqueue delivery mode check work on RT. If the sleeper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * was initialized for hard interrupt delivery, force the mode bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * This is a special case for hrtimer_sleepers because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * hrtimer_init_sleeper() determines the delivery mode on RT so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * fiddling with this decision is avoided at the call sites.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) mode |= HRTIMER_MODE_HARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) hrtimer_start_expires(&sl->timer, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) clockid_t clock_id, enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * On PREEMPT_RT enabled kernels hrtimers which are not explicitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * marked for hard interrupt expiry mode are moved into soft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * interrupt context either for latency reasons or because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * hrtimer callback takes regular spinlocks or invokes other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * functions which are not suitable for hard interrupt context on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * PREEMPT_RT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * The hrtimer_sleeper callback is RT compatible in hard interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * context, but there is a latency concern: Untrusted userspace can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * spawn many threads which arm timers for the same expiry time on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * the same CPU. That causes a latency spike due to the wakeup of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * a gazillion threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * OTOH, priviledged real-time user space applications rely on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * low latency of hard interrupt wakeups. If the current task is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * a real-time scheduling class, mark the mode for hard interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * expiry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) mode |= HRTIMER_MODE_HARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) __hrtimer_init(&sl->timer, clock_id, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) sl->timer.function = hrtimer_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) sl->task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * hrtimer_init_sleeper - initialize sleeper to the given clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * @sl: sleeper to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * @clock_id: the clock to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * @mode: timer mode abs/rel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) debug_init(&sl->timer, clock_id, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) __hrtimer_init_sleeper(sl, clock_id, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) switch(restart->nanosleep.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) #ifdef CONFIG_COMPAT_32BIT_TIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) case TT_COMPAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) case TT_NATIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (put_timespec64(ts, restart->nanosleep.rmtp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) return -ERESTART_RESTARTBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) struct restart_block *restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) hrtimer_sleeper_start_expires(t, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (likely(t->task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) freezable_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) hrtimer_cancel(&t->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) mode = HRTIMER_MODE_ABS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) } while (t->task && !signal_pending(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (!t->task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) restart = ¤t->restart_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (restart->nanosleep.type != TT_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) ktime_t rem = hrtimer_expires_remaining(&t->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) struct timespec64 rmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (rem <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) rmt = ktime_to_timespec64(rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return nanosleep_copyout(restart, &rmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) return -ERESTART_RESTARTBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct hrtimer_sleeper t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) destroy_hrtimer_on_stack(&t.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) const clockid_t clockid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct restart_block *restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct hrtimer_sleeper t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) u64 slack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) slack = current->timer_slack_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (dl_task(current) || rt_task(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) slack = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) hrtimer_init_sleeper_on_stack(&t, clockid, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) ret = do_nanosleep(&t, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (ret != -ERESTART_RESTARTBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) /* Absolute timers do not update the rmtp value and restart: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (mode == HRTIMER_MODE_ABS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) ret = -ERESTARTNOHAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) restart = ¤t->restart_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) restart->nanosleep.clockid = t.timer.base->clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) set_restart_fn(restart, hrtimer_nanosleep_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) destroy_hrtimer_on_stack(&t.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) struct __kernel_timespec __user *, rmtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) struct timespec64 tu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (get_timespec64(&tu, rqtp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (!timespec64_valid(&tu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) current->restart_block.nanosleep.rmtp = rmtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) CLOCK_MONOTONIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) #ifdef CONFIG_COMPAT_32BIT_TIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) struct old_timespec32 __user *, rmtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) struct timespec64 tu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (get_old_timespec32(&tu, rqtp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (!timespec64_valid(&tu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) current->restart_block.nanosleep.compat_rmtp = rmtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) CLOCK_MONOTONIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * Functions related to boot-time initialization:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) int hrtimers_prepare_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) clock_b->cpu_base = cpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) timerqueue_init_head(&clock_b->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) cpu_base->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) cpu_base->active_bases = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) cpu_base->hres_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) cpu_base->hang_detected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) cpu_base->next_timer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) cpu_base->softirq_next_timer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) cpu_base->expires_next = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) cpu_base->softirq_expires_next = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) hrtimer_cpu_base_init_expiry_lock(cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) struct hrtimer_clock_base *new_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) struct hrtimer *timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) struct timerqueue_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) while ((node = timerqueue_getnext(&old_base->active))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) timer = container_of(node, struct hrtimer, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) BUG_ON(hrtimer_callback_running(timer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) debug_deactivate(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * Mark it as ENQUEUED not INACTIVE otherwise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * timer could be seen as !active and just vanish away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * under us on another CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) timer->base = new_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * Enqueue the timers on the new cpu. This does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * reprogram the event device in case the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * expires before the earliest on this CPU, but we run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * hrtimer_interrupt after we migrated everything to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * sort out already expired timers and reprogram the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * event device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) int hrtimers_dead_cpu(unsigned int scpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) struct hrtimer_cpu_base *old_base, *new_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) BUG_ON(cpu_online(scpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) tick_cancel_sched_timer(scpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * this BH disable ensures that raise_softirq_irqoff() does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * not wakeup ksoftirqd (and acquire the pi-lock) while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * holding the cpu_base lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) old_base = &per_cpu(hrtimer_bases, scpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) new_base = this_cpu_ptr(&hrtimer_bases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * The caller is globally serialized and nobody else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * takes two locks at once, deadlock is not possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) raw_spin_lock(&new_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) migrate_hrtimer_list(&old_base->clock_base[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) &new_base->clock_base[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * The migration might have changed the first expiring softirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * timer on this CPU. Update it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) hrtimer_update_softirq_timer(new_base, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) raw_spin_unlock(&old_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) raw_spin_unlock(&new_base->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) /* Check, if we got expired work to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) __hrtimer_peek_ahead_timers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) #endif /* CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) void __init hrtimers_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) hrtimers_prepare_cpu(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) * schedule_hrtimeout_range_clock - sleep until timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * @expires: timeout value (ktime_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * @delta: slack in expires timeout (ktime_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * @mode: timer mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * @clock_id: timer clock to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) const enum hrtimer_mode mode, clockid_t clock_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) struct hrtimer_sleeper t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) * Optimize when a zero timeout value is given. It does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * matter whether this is an absolute or a relative time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (expires && *expires == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) * A NULL parameter means "infinite"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (!expires) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) hrtimer_sleeper_start_expires(&t, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (likely(t.task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) hrtimer_cancel(&t.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) destroy_hrtimer_on_stack(&t.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) return !t.task ? 0 : -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) * schedule_hrtimeout_range - sleep until timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) * @expires: timeout value (ktime_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * @delta: slack in expires timeout (ktime_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * @mode: timer mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * Make the current task sleep until the given expiry time has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * elapsed. The routine will return immediately unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * the current task state has been set (see set_current_state()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * The @delta argument gives the kernel the freedom to schedule the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * actual wakeup to a time that is both power and performance friendly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * The kernel give the normal best effort behavior for "@expires+@delta",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * but may decide to fire the timer earlier, but no earlier than @expires.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * You can set the task state as follows -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) * pass before the routine returns unless the current task is explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) * woken up, (e.g. by wake_up_process()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * delivered to the current task or the current task is explicitly woken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * The current task state is guaranteed to be TASK_RUNNING when this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * routine returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * Returns 0 when the timer has expired. If the task was woken before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * by an explicit wakeup, it returns -EINTR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) const enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) return schedule_hrtimeout_range_clock(expires, delta, mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) CLOCK_MONOTONIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * schedule_hrtimeout - sleep until timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * @expires: timeout value (ktime_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * @mode: timer mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * Make the current task sleep until the given expiry time has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * elapsed. The routine will return immediately unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * the current task state has been set (see set_current_state()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * You can set the task state as follows -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * pass before the routine returns unless the current task is explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) * woken up, (e.g. by wake_up_process()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) * delivered to the current task or the current task is explicitly woken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) * The current task state is guaranteed to be TASK_RUNNING when this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * routine returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * Returns 0 when the timer has expired. If the task was woken before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) * by an explicit wakeup, it returns -EINTR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) int __sched schedule_hrtimeout(ktime_t *expires,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) const enum hrtimer_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) return schedule_hrtimeout_range(expires, 0, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) EXPORT_SYMBOL_GPL(schedule_hrtimeout);