^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Implement CPU time clocks for the POSIX clock interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/sched/cputime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/posix-timers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/math64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <trace/events/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched/deadline.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "posix-timers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static void posix_cpu_timer_rearm(struct k_itimer *timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) posix_cputimers_init(pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (cpu_limit != RLIM_INFINITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) pct->timers_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Called after updating RLIMIT_CPU to run cpu timer and update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * necessary. Needs siglock protection since other code may update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * expiration cache as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u64 nsecs = rlim_new * NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) spin_lock_irq(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) spin_unlock_irq(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Functions for validating access to tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) const bool thread = !!CPUCLOCK_PERTHREAD(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) const pid_t upid = CPUCLOCK_PID(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct pid *pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * If the encoded PID is 0, then the timer is targeted at current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * or the process to which current belongs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (upid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return thread ? task_pid(current) : task_tgid(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) pid = find_vpid(upid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * For clock_gettime(PROCESS) allow finding the process by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * with the pid of the current task. The code needs the tgid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * of the process so that pid_task(pid, PIDTYPE_TGID) can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * used to find the process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (gettime && (pid == task_pid(current)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return task_tgid(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * For processes require that pid identifies a process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline int validate_clock_permissions(const clockid_t clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline enum pid_type clock_pid_type(const clockid_t clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * Update expiry time from increment, and increase overrun count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * given the current clock sample.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u64 delta, incr, expires = timer->it.cpu.node.expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!timer->it_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (now < expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) incr = timer->it_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) delta = now + incr - expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Don't use (incr*2 < delta), incr*2 might overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) for (i = 0; incr < delta - incr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) incr = incr << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) for (; i >= 0; incr >>= 1, i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (delta < incr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) timer->it.cpu.node.expires += incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) timer->it_overrun += 1LL << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) delta -= incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return timer->it.cpu.node.expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return !(~pct->bases[CPUCLOCK_PROF].nextevt |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ~pct->bases[CPUCLOCK_VIRT].nextevt |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ~pct->bases[CPUCLOCK_SCHED].nextevt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int error = validate_clock_permissions(which_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) tp->tv_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * If sched_clock is using a cycle counter, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * don't have any idea of its true resolution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * exported, but it is much more than 1s/HZ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) tp->tv_nsec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int error = validate_clock_permissions(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * You can never reset a CPU clock, but we check for other errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * in the call before failing with EPERM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return error ? : -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Sample a per-thread clock for the given task. clkid is validated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u64 utime, stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (clkid == CPUCLOCK_SCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return task_sched_runtime(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) task_cputime(p, &utime, &stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) switch (clkid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) case CPUCLOCK_PROF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return utime + stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) case CPUCLOCK_VIRT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) samples[CPUCLOCK_PROF] = stime + utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) samples[CPUCLOCK_VIRT] = utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) samples[CPUCLOCK_SCHED] = rtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void task_sample_cputime(struct task_struct *p, u64 *samples)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u64 stime, utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) task_cputime(p, &utime, &stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) store_samples(samples, stime, utime, p->se.sum_exec_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u64 *samples)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u64 stime, utime, rtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) utime = atomic64_read(&at->utime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) stime = atomic64_read(&at->stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rtime = atomic64_read(&at->sum_exec_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) store_samples(samples, stime, utime, rtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * to avoid race conditions with concurrent updates to cputime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u64 curr_cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) curr_cputime = atomic64_read(cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (sum_cputime > curr_cputime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct task_cputime *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) __update_gt_cputime(&cputime_atomic->utime, sum->utime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) __update_gt_cputime(&cputime_atomic->stime, sum->stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * thread_group_sample_cputime - Sample cputime for a given task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @tsk: Task for which cputime needs to be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * @samples: Storage for time samples
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * Called from sys_getitimer() to calculate the expiry time of an active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * timer. That means group cputime accounting is already active. Called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * with task sighand lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Updates @times with an uptodate sample of the thread group cputimes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) WARN_ON_ONCE(!pct->timers_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * thread_group_start_cputime - Start cputime and return a sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * @tsk: Task for which cputime needs to be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * @samples: Storage for time samples
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * The thread group cputime accouting is avoided when there are no posix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * CPU timers armed. Before starting a timer it's required to check whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * the time accounting is active. If not, a full update of the atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * accounting store needs to be done and the accounting enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * Updates @times with an uptodate sample of the thread group cputimes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* Check if cputimer isn't running. This is accessed without locking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!READ_ONCE(pct->timers_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct task_cputime sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * The POSIX timer interface allows for absolute time expiry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * values through the TIMER_ABSTIME flag, therefore we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * to synchronize the timer to the clock every time we start it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) thread_group_cputime(tsk, &sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) update_gt_cputime(&cputimer->cputime_atomic, &sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * We're setting timers_active without a lock. Ensure this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * only gets written to in one operation. We set it after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * update_gt_cputime() as a small optimization, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * barriers are not required because update_gt_cputime()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * can handle concurrent updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) WRITE_ONCE(pct->timers_active, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct task_cputime ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) thread_group_cputime(tsk, &ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Sample a process (thread group) clock for the given task clkid. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * group's cputime accounting is already enabled, read the atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * store. Otherwise a full update is required. clkid is already validated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bool start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct thread_group_cputimer *cputimer = &p->signal->cputimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct posix_cputimers *pct = &p->signal->posix_cputimers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) u64 samples[CPUCLOCK_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!READ_ONCE(pct->timers_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) thread_group_start_cputime(p, samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) __thread_group_cputime(p, samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return samples[clkid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) const clockid_t clkid = CPUCLOCK_WHICH(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!tsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (CPUCLOCK_PERTHREAD(clock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) t = cpu_clock_sample(clkid, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) t = cpu_clock_sample_group(clkid, tsk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *tp = ns_to_timespec64(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * This is called from sys_timer_create() and do_cpu_nanosleep() with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * new timer already all-zeros initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static int posix_cpu_timer_create(struct k_itimer *new_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static struct lock_class_key posix_cpu_timers_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct pid *pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) pid = pid_for_clock(new_timer->it_clock, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (!pid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * If posix timer expiry is handled in task work context then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * timer::it_lock can be taken without disabling interrupts as all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * other locking happens in task context. This requires a seperate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * lock class key otherwise regular posix timer expiry would record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * the lock class being taken in interrupt context and generate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * false positive warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) new_timer->kclock = &clock_posix_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) timerqueue_init(&new_timer->it.cpu.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) new_timer->it.cpu.pid = get_pid(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Clean up a CPU-clock timer that is about to be destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * This is called from timer deletion with the timer already locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * If we return TIMER_RETRY, it's necessary to release the timer's lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * and try again. (This happens when the timer is in the middle of firing.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static int posix_cpu_timer_del(struct k_itimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct cpu_timer *ctmr = &timer->it.cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct sighand_struct *sighand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) p = cpu_timer_task_rcu(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Protect against sighand release/switch in exit/exec and process/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * thread timer list entry concurrent read/writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) sighand = lock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (unlikely(sighand == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * This raced with the reaping of the task. The exit cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * should have removed this timer from the timer queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (timer->it.cpu.firing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ret = TIMER_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) cpu_timer_dequeue(ctmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unlock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) put_pid(ctmr->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static void cleanup_timerqueue(struct timerqueue_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct timerqueue_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct cpu_timer *ctmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) while ((node = timerqueue_getnext(head))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) timerqueue_del(head, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ctmr = container_of(node, struct cpu_timer, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ctmr->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Clean out CPU timers which are still armed when a thread exits. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * timers are only removed from the list. No other updates are done. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * corresponding posix timers are still accessible, but cannot be rearmed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * This must be called with the siglock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static void cleanup_timers(struct posix_cputimers *pct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * These are both called with the siglock held, when the current thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * is being reaped. When the final (leader) thread in the group is reaped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) void posix_cpu_timers_exit(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) cleanup_timers(&tsk->posix_cputimers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) void posix_cpu_timers_exit_group(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) cleanup_timers(&tsk->signal->posix_cputimers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Insert the timer on the appropriate list before any timers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * expire later. This must be called with the sighand lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void arm_timer(struct k_itimer *timer, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int clkidx = CPUCLOCK_WHICH(timer->it_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct cpu_timer *ctmr = &timer->it.cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) u64 newexp = cpu_timer_getexpires(ctmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct posix_cputimer_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (CPUCLOCK_PERTHREAD(timer->it_clock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) base = p->posix_cputimers.bases + clkidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) base = p->signal->posix_cputimers.bases + clkidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!cpu_timer_enqueue(&base->tqhead, ctmr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * We are the new earliest-expiring POSIX 1.b timer, hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * need to update expiration cache. Take into account that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * for process timers we share expiration cache with itimers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (newexp < base->nextevt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) base->nextevt = newexp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (CPUCLOCK_PERTHREAD(timer->it_clock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * The timer is locked, fire it and arrange for its reload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void cpu_timer_fire(struct k_itimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct cpu_timer *ctmr = &timer->it.cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * User don't want any signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) cpu_timer_setexpires(ctmr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) } else if (unlikely(timer->sigq == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * This a special case for clock_nanosleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * not a normal timer from sys_timer_create.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) wake_up_process(timer->it_process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) cpu_timer_setexpires(ctmr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) } else if (!timer->it_interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * One-shot timer. Clear it as soon as it's fired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) posix_timer_event(timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) cpu_timer_setexpires(ctmr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * The signal did not get queued because the signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * was ignored, so we won't get any callback to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * reload the timer. But we need to keep it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * ticking in case the signal is deliverable next time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) posix_cpu_timer_rearm(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ++timer->it_requeue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Guts of sys_timer_settime for CPU timers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * This is called with the timer locked and interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * If we return TIMER_RETRY, it's necessary to release the timer's lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * and try again. (This happens when the timer is in the middle of firing.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct itimerspec64 *new, struct itimerspec64 *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) u64 old_expires, new_expires, old_incr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct cpu_timer *ctmr = &timer->it.cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct sighand_struct *sighand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) p = cpu_timer_task_rcu(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * If p has just been reaped, we can no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * longer get any information about it at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * Use the to_ktime conversion because that clamps the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * value to KTIME_MAX and avoid multiplication overflows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Protect against sighand release/switch in exit/exec and p->cpu_timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * and p->signal->cpu_timers read/write in arm_timer()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) sighand = lock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * If p has just been reaped, we can no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * longer get any information about it at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (unlikely(sighand == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * Disarm any old timer after extracting its expiry time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) old_incr = timer->it_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) old_expires = cpu_timer_getexpires(ctmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (unlikely(timer->it.cpu.firing)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) timer->it.cpu.firing = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ret = TIMER_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) cpu_timer_dequeue(ctmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * We need to sample the current value to convert the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * value from to relative and absolute, and to convert the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * old value from absolute to relative. To set a process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * timer, we need a sample to balance the thread expiry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * times (in arm_timer). With an absolute time, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * check if it's already passed. In short, we need a sample.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (CPUCLOCK_PERTHREAD(timer->it_clock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) val = cpu_clock_sample(clkid, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) val = cpu_clock_sample_group(clkid, p, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (old_expires == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) old->it_value.tv_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) old->it_value.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * Update the timer in case it has overrun already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * If it has, we'll report it as having overrun and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * with the next reloaded timer already ticking,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * though we are swallowing that pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * notification here to install the new setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) u64 exp = bump_cpu_timer(timer, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (val < exp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) old_expires = exp - val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) old->it_value = ns_to_timespec64(old_expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) old->it_value.tv_nsec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) old->it_value.tv_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * We are colliding with the timer actually firing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * Punt after filling in the timer's old value, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * disable this firing since we are already reporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * it as an overrun (thanks to bump_cpu_timer above).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unlock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) new_expires += val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Install the new expiry time (or zero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * For a timer with no notification action, we don't actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * arm the timer (we'll just fake it for timer_gettime).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) cpu_timer_setexpires(ctmr, new_expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (new_expires != 0 && val < new_expires) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) arm_timer(timer, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) unlock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * Install the new reload setting, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * set up the signal and overrun bookkeeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) timer->it_interval = timespec64_to_ktime(new->it_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * This acts as a modification timestamp for the timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * so any automatic reload attempt will punt on seeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * that we have reset the timer manually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ~REQUEUE_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) timer->it_overrun_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) timer->it_overrun = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (new_expires != 0 && !(val < new_expires)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * The designated time already passed, so we notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * immediately, even if the thread never runs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * accumulate more time on this clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) cpu_timer_fire(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) old->it_interval = ns_to_timespec64(old_incr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct cpu_timer *ctmr = &timer->it.cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) u64 now, expires = cpu_timer_getexpires(ctmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) p = cpu_timer_task_rcu(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * Easy part: convert the reload time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) itp->it_interval = ktime_to_timespec64(timer->it_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (!expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * Sample the clock to take the difference with the expiry time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (CPUCLOCK_PERTHREAD(timer->it_clock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) now = cpu_clock_sample(clkid, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) now = cpu_clock_sample_group(clkid, p, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (now < expires) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) itp->it_value = ns_to_timespec64(expires - now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * The timer should have expired already, but the firing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * hasn't taken place yet. Say it's just about to expire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) itp->it_value.tv_nsec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) itp->it_value.tv_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) #define MAX_COLLECTED 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static u64 collect_timerqueue(struct timerqueue_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct list_head *firing, u64 now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct timerqueue_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) while ((next = timerqueue_getnext(head))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct cpu_timer *ctmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) u64 expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ctmr = container_of(next, struct cpu_timer, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) expires = cpu_timer_getexpires(ctmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* Limit the number of timers to expire at once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (++i == MAX_COLLECTED || now < expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ctmr->firing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) cpu_timer_dequeue(ctmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) list_add_tail(&ctmr->elist, firing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct list_head *firing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct posix_cputimer_base *base = pct->bases;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) base->nextevt = collect_timerqueue(&base->tqhead, firing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) samples[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static inline void check_dl_overrun(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (tsk->dl.dl_overrun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) tsk->dl.dl_overrun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (time < limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (print_fatal_signals) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rt ? "RT" : "CPU", hard ? "hard" : "soft",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) current->comm, task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) __group_send_sig_info(signo, SEND_SIG_PRIV, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Check for any per-thread CPU timers that have fired and move them off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * the tsk->cpu_timers[N] list onto the firing list. Here we update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * tsk->it_*_expires values to reflect the remaining thread CPU timers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static void check_thread_timers(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct list_head *firing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct posix_cputimers *pct = &tsk->posix_cputimers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u64 samples[CPUCLOCK_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) unsigned long soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (dl_task(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) check_dl_overrun(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (expiry_cache_is_inactive(pct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) task_sample_cputime(tsk, samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) collect_posix_cputimers(pct, samples, firing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * Check for the special case thread timers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) soft = task_rlimit(tsk, RLIMIT_RTTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (soft != RLIM_INFINITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* Task RT timeout is accounted in jiffies. RTTIME is usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* At the hard limit, send SIGKILL. No further action. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (hard != RLIM_INFINITY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) check_rlimit(rttime, hard, SIGKILL, true, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* At the soft limit, send a SIGXCPU every second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) soft += USEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (expiry_cache_is_inactive(pct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static inline void stop_process_timers(struct signal_struct *sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct posix_cputimers *pct = &sig->posix_cputimers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* Turn off the active flag. This is done without locking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) WRITE_ONCE(pct->timers_active, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) u64 *expires, u64 cur_time, int signo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!it->expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (cur_time >= it->expires) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (it->incr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) it->expires += it->incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) it->expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) trace_itimer_expire(signo == SIGPROF ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) ITIMER_PROF : ITIMER_VIRTUAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) task_tgid(tsk), cur_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (it->expires && it->expires < *expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) *expires = it->expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * Check for any per-thread CPU timers that have fired and move them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * off the tsk->*_timers list onto the firing list. Per-thread timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * have already been taken off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static void check_process_timers(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct list_head *firing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct signal_struct *const sig = tsk->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct posix_cputimers *pct = &sig->posix_cputimers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) u64 samples[CPUCLOCK_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) unsigned long soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * If there are no active process wide timers (POSIX 1.b, itimers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * RLIMIT_CPU) nothing to check. Also skip the process wide timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * processing when there is already another task handling them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * Signify that a thread is checking for process timers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * Write access to this field is protected by the sighand lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) pct->expiry_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Collect the current process totals. Group accounting is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * so the sample can be taken directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) collect_posix_cputimers(pct, samples, firing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * Check for the special case process timers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) &pct->bases[CPUCLOCK_PROF].nextevt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) samples[CPUCLOCK_PROF], SIGPROF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) &pct->bases[CPUCLOCK_VIRT].nextevt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) samples[CPUCLOCK_VIRT], SIGVTALRM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) soft = task_rlimit(tsk, RLIMIT_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (soft != RLIM_INFINITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) u64 ptime = samples[CPUCLOCK_PROF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) u64 softns = (u64)soft * NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) u64 hardns = (u64)hard * NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* At the hard limit, send SIGKILL. No further action. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (hard != RLIM_INFINITY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) check_rlimit(ptime, hardns, SIGKILL, false, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* At the soft limit, send a SIGXCPU every second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) softns += NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* Update the expiry cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) pct->bases[CPUCLOCK_PROF].nextevt = softns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (expiry_cache_is_inactive(pct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) stop_process_timers(sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) pct->expiry_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * This is called from the signal code (via posixtimer_rearm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * when the last timer signal was delivered and we have to reload the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) static void posix_cpu_timer_rearm(struct k_itimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct sighand_struct *sighand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) p = cpu_timer_task_rcu(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /* Protect timer list r/w in arm_timer() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) sighand = lock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (unlikely(sighand == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * Fetch the current sample and update the timer's expiry time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (CPUCLOCK_PERTHREAD(timer->it_clock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) now = cpu_clock_sample(clkid, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) now = cpu_clock_sample_group(clkid, p, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) bump_cpu_timer(timer, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * Now re-arm for the new expiry time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) arm_timer(timer, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) unlock_task_sighand(p, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * task_cputimers_expired - Check whether posix CPU timers are expired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * @samples: Array of current samples for the CPUCLOCK clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * @pct: Pointer to a posix_cputimers container
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * Returns true if any member of @samples is greater than the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * member of @pct->bases[CLK].nextevt. False otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) for (i = 0; i < CPUCLOCK_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (samples[i] >= pct->bases[i].nextevt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * fastpath_timer_check - POSIX CPU timers fast path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * @tsk: The task (thread) being checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * Check the task and thread group timers. If both are zero (there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * timers set) return false. Otherwise snapshot the task and thread group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * timers and compare them with the corresponding expiration times. Return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * true if a timer has expired, else return false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static inline bool fastpath_timer_check(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct posix_cputimers *pct = &tsk->posix_cputimers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct signal_struct *sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (!expiry_cache_is_inactive(pct)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) u64 samples[CPUCLOCK_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) task_sample_cputime(tsk, samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (task_cputimers_expired(samples, pct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) sig = tsk->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) pct = &sig->posix_cputimers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * Check if thread group timers expired when timers are active and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * no other thread in the group is already handling expiry for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * thread group cputimers. These fields are read without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * sighand lock. However, this is fine because this is meant to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * a fastpath heuristic to determine whether we should try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * acquire the sighand lock to handle timer expiry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * In the worst case scenario, if concurrently timers_active is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * or expiry_active is cleared, but the current thread doesn't see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * the change yet, the timer checks are delayed until the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * thread in the group gets a scheduler interrupt to handle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * timer. This isn't an issue in practice because these types of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * delays with signals actually getting sent are expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) u64 samples[CPUCLOCK_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (task_cputimers_expired(samples, pct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (dl_task(tsk) && tsk->dl.dl_overrun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static void handle_posix_cpu_timers(struct task_struct *tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static void posix_cpu_timers_work(struct callback_head *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) handle_posix_cpu_timers(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * Clear existing posix CPU timers task work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) void clear_posix_cputimers_work(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * A copied work entry from the old task is not meaningful, clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * N.B. init_task_work will not do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) memset(&p->posix_cputimers_work.work, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) sizeof(p->posix_cputimers_work.work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) init_task_work(&p->posix_cputimers_work.work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) posix_cpu_timers_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) p->posix_cputimers_work.scheduled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * Initialize posix CPU timers task work in init task. Out of line to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * keep the callback static and to avoid header recursion hell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) void __init posix_cputimers_init_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) clear_posix_cputimers_work(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * in hard interrupt context or in task context with interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * disabled. Aside of that the writer/reader interaction is always in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * context of the current task, which means they are strict per CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return tsk->posix_cputimers_work.scheduled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static inline void __run_posix_cpu_timers(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* Schedule task work to actually expire the timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) tsk->posix_cputimers_work.scheduled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) unsigned long start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * On !RT kernels interrupts are disabled while collecting expired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * timers, so no tick can happen and the fast path check can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * reenabled without further checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) tsk->posix_cputimers_work.scheduled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * On RT enabled kernels ticks can happen while the expired timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * are collected under sighand lock. But any tick which observes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * checks. So reenabling the tick work has do be done carefully:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * Disable interrupts and run the fast path check if jiffies have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * advanced since the collecting of expired timers started. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * jiffies have not advanced or the fast path check did not find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * newly expired timers, reenable the fast path check in the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * interrupt. If there are newly expired timers, return false and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * let the collection loop repeat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (start != jiffies && fastpath_timer_check(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) tsk->posix_cputimers_work.scheduled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) #else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static inline void __run_posix_cpu_timers(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) lockdep_posixtimer_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) handle_posix_cpu_timers(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) lockdep_posixtimer_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) unsigned long start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) #endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void handle_posix_cpu_timers(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct k_itimer *timer, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) unsigned long flags, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) LIST_HEAD(firing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (!lock_task_sighand(tsk, &flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * On RT locking sighand lock does not disable interrupts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * so this needs to be careful vs. ticks. Store the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * jiffies value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) start = READ_ONCE(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * Here we take off tsk->signal->cpu_timers[N] and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * tsk->cpu_timers[N] all the timers that are firing, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * put them on the firing list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) check_thread_timers(tsk, &firing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) check_process_timers(tsk, &firing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * The above timer checks have updated the exipry cache and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * because nothing can have queued or modified timers after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * sighand lock was taken above it is guaranteed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * consistent. So the next timer interrupt fastpath check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * will find valid data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * If timer expiry runs in the timer interrupt context then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * the loop is not relevant as timers will be directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * expired in interrupt context. The stub function below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * returns always true which allows the compiler to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * optimize the loop out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * If timer expiry is deferred to task work context then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * the following rules apply:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * - On !RT kernels no tick can have happened on this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * after sighand lock was acquired because interrupts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * disabled. So reenabling task work before dropping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * sighand lock and reenabling interrupts is race free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * - On RT kernels ticks might have happened but the tick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * work ignored posix CPU timer handling because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * must be done very carefully including a check whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * ticks have happened since the start of the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * expiry checks. posix_cpu_timers_enable_work() takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * care of that and eventually lets the expiry checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * run again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) } while (!posix_cpu_timers_enable_work(tsk, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * We must release sighand lock before taking any timer's lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * There is a potential race with timer deletion here, as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * siglock now protects our private firing list. We have set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * the firing flag in each timer, so that a deletion attempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * that gets the timer lock before we do will give it up and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * spin until we've taken care of that timer below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) unlock_task_sighand(tsk, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * Now that all the timers on our list have the firing flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * no one will touch their list entries but us. We'll take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * each timer's lock before clearing its firing flag, so no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * timer call will interfere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int cpu_firing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * spin_lock() is sufficient here even independent of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * expiry context. If expiry happens in hard interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * context it's obvious. For task work context it's safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * because all other operations on timer::it_lock happen in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * task context (syscall or exit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) spin_lock(&timer->it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) list_del_init(&timer->it.cpu.elist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) cpu_firing = timer->it.cpu.firing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) timer->it.cpu.firing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * The firing flag is -1 if we collided with a reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * of the timer, which already reported this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * almost-firing as an overrun. So don't generate an event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (likely(cpu_firing >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) cpu_timer_fire(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) spin_unlock(&timer->it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * This is called from the timer interrupt handler. The irq handler has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * already updated our counts. We need to check if any timers fire now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * Interrupts are disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) void run_posix_cpu_timers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * If the actual expiry is deferred to task work context and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * work is already scheduled there is no point to do anything here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (posix_cpu_timers_work_scheduled(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * The fast path checks that there are no expired thread or thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * group timers. If that's so, just return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (!fastpath_timer_check(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) __run_posix_cpu_timers(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * The tsk->sighand->siglock must be held by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) u64 *newval, u64 *oldval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) u64 now, *nextevt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) now = cpu_clock_sample_group(clkid, tsk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (oldval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * We are setting itimer. The *oldval is absolute and we update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * it to be relative, *newval argument is relative and we update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * it to be absolute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (*oldval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (*oldval <= now) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /* Just about to fire. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) *oldval = TICK_NSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) *oldval -= now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (!*newval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) *newval += now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * expiry cache is also used by RLIMIT_CPU!.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (*newval < *nextevt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) *nextevt = *newval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) const struct timespec64 *rqtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct itimerspec64 it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) struct k_itimer timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) u64 expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * Set up a temporary timer and then wait for it to go off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) memset(&timer, 0, sizeof timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) spin_lock_init(&timer.it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) timer.it_clock = which_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) timer.it_overrun = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) error = posix_cpu_timer_create(&timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) timer.it_process = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static struct itimerspec64 zero_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct restart_block *restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) memset(&it, 0, sizeof(it));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) it.it_value = *rqtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) spin_lock_irq(&timer.it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) error = posix_cpu_timer_set(&timer, flags, &it, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) spin_unlock_irq(&timer.it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) while (!signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (!cpu_timer_getexpires(&timer.it.cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * Our timer fired and was reset, below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * deletion can not fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) posix_cpu_timer_del(&timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) spin_unlock_irq(&timer.it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * Block until cpu_timer_fire (or a signal) wakes us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) __set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) spin_unlock_irq(&timer.it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) spin_lock_irq(&timer.it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * We were interrupted by a signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) expires = cpu_timer_getexpires(&timer.it.cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * Timer is now unarmed, deletion can not fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) posix_cpu_timer_del(&timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) spin_unlock_irq(&timer.it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) while (error == TIMER_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * We need to handle case when timer was or is in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * middle of firing. In other cases we already freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) spin_lock_irq(&timer.it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) error = posix_cpu_timer_del(&timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) spin_unlock_irq(&timer.it_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * It actually did fire already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) error = -ERESTART_RESTARTBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * Report back to the user the time still remaining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) restart = ¤t->restart_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) restart->nanosleep.expires = expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (restart->nanosleep.type != TT_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) error = nanosleep_copyout(restart, &it.it_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) const struct timespec64 *rqtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct restart_block *restart_block = ¤t->restart_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * Diagnose required errors first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (CPUCLOCK_PERTHREAD(which_clock) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) (CPUCLOCK_PID(which_clock) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) error = do_cpu_nanosleep(which_clock, flags, rqtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (error == -ERESTART_RESTARTBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (flags & TIMER_ABSTIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return -ERESTARTNOHAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) restart_block->nanosleep.clockid = which_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) set_restart_fn(restart_block, posix_cpu_nsleep_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) clockid_t which_clock = restart_block->nanosleep.clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) struct timespec64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) t = ns_to_timespec64(restart_block->nanosleep.expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) static int process_cpu_clock_getres(const clockid_t which_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) struct timespec64 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static int process_cpu_clock_get(const clockid_t which_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct timespec64 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return posix_cpu_clock_get(PROCESS_CLOCK, tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static int process_cpu_timer_create(struct k_itimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) timer->it_clock = PROCESS_CLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return posix_cpu_timer_create(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) static int process_cpu_nsleep(const clockid_t which_clock, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) const struct timespec64 *rqtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static int thread_cpu_clock_getres(const clockid_t which_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct timespec64 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return posix_cpu_clock_getres(THREAD_CLOCK, tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static int thread_cpu_clock_get(const clockid_t which_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct timespec64 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return posix_cpu_clock_get(THREAD_CLOCK, tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) static int thread_cpu_timer_create(struct k_itimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) timer->it_clock = THREAD_CLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return posix_cpu_timer_create(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) const struct k_clock clock_posix_cpu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) .clock_getres = posix_cpu_clock_getres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) .clock_set = posix_cpu_clock_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) .clock_get_timespec = posix_cpu_clock_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) .timer_create = posix_cpu_timer_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) .nsleep = posix_cpu_nsleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) .timer_set = posix_cpu_timer_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) .timer_del = posix_cpu_timer_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) .timer_get = posix_cpu_timer_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) .timer_rearm = posix_cpu_timer_rearm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) const struct k_clock clock_process = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) .clock_getres = process_cpu_clock_getres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) .clock_get_timespec = process_cpu_clock_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) .timer_create = process_cpu_timer_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) .nsleep = process_cpu_nsleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) const struct k_clock clock_thread = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) .clock_getres = thread_cpu_clock_getres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) .clock_get_timespec = thread_cpu_clock_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) .timer_create = thread_cpu_timer_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) };