^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Simple CPU accounting cgroup controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/cpufreq_times.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <trace/hooks/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifdef CONFIG_IRQ_TIME_ACCOUNTING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * There are no locks covering percpu hardirq/softirq time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * They are only modified in vtime_account, on corresponding CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * with interrupts disabled. So, writes are safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * They are read and saved off onto struct rq in update_rq_clock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * This may result in other CPU reading this CPU's irq time and can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * race with irq/vtime_account on this CPU. We would either get old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * or new value with a side effect of accounting a slice of irq time to wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * task when irq is in progress while we read rq->clock. That is a worthy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * compromise in place of having locks on each irq in account_system_time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int sched_clock_irqtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) void enable_sched_clock_irqtime(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) sched_clock_irqtime = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void disable_sched_clock_irqtime(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) sched_clock_irqtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) enum cpu_usage_stat idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u64 *cpustat = kcpustat_this_cpu->cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u64_stats_update_begin(&irqtime->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) cpustat[idx] += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) irqtime->total += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) irqtime->tick_delta += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u64_stats_update_end(&irqtime->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Called before incrementing preempt_count on {soft,}irq_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * and before decrementing preempt_count on {soft,}irq_exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void irqtime_account_irq(struct task_struct *curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!sched_clock_irqtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) irqtime->irq_start_time += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * We do not account for softirq time from ksoftirqd here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * We want to continue accounting softirq time to ksoftirqd thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * in that case, so as not to confuse scheduler with a special task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * that do not consume any time, but still wants to run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (hardirq_count())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) trace_android_rvh_account_irq(curr, cpu, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) EXPORT_SYMBOL_GPL(irqtime_account_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static u64 irqtime_tick_accounted(u64 maxtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) delta = min(irqtime->tick_delta, maxtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) irqtime->tick_delta -= delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #else /* CONFIG_IRQ_TIME_ACCOUNTING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define sched_clock_irqtime (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static u64 irqtime_tick_accounted(u64 dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline void task_group_account_field(struct task_struct *p, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) u64 tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Since all updates are sure to touch the root cgroup, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * get ourselves ahead and touch it first. If the root cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * is the only cgroup, then nothing else should be necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cgroup_account_cputime_field(p, index, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Account user CPU time to a process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @p: the process that the CPU time gets accounted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @cputime: the CPU time spent in user space since the last update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void account_user_time(struct task_struct *p, u64 cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Add user time to process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) p->utime += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) account_group_user_time(p, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Add user time to cpustat. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) task_group_account_field(p, index, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Account for user time used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) acct_account_cputime(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Account power usage for user time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) cpufreq_acct_update_power(p, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Account guest CPU time to a process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @p: the process that the CPU time gets accounted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @cputime: the CPU time spent in virtual machine since the last update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void account_guest_time(struct task_struct *p, u64 cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u64 *cpustat = kcpustat_this_cpu->cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Add guest time to process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) p->utime += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) account_group_user_time(p, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) p->gtime += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Add guest time to cpustat. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (task_nice(p) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) task_group_account_field(p, CPUTIME_NICE, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) cpustat[CPUTIME_GUEST_NICE] += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) task_group_account_field(p, CPUTIME_USER, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) cpustat[CPUTIME_GUEST] += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Account system CPU time to a process and desired cpustat field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * @p: the process that the CPU time gets accounted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @cputime: the CPU time spent in kernel space since the last update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @index: pointer to cpustat field that has to be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) void account_system_index_time(struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u64 cputime, enum cpu_usage_stat index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Add system time to process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) p->stime += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) account_group_system_time(p, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Add system time to cpustat. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) task_group_account_field(p, index, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* Account for system time used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) acct_account_cputime(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Account power usage for system time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) cpufreq_acct_update_power(p, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Account system CPU time to a process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @p: the process that the CPU time gets accounted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @hardirq_offset: the offset to subtract from hardirq_count()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @cputime: the CPU time spent in kernel space since the last update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) account_guest_time(p, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (hardirq_count() - hardirq_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) index = CPUTIME_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) else if (in_serving_softirq())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) index = CPUTIME_SOFTIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) index = CPUTIME_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) account_system_index_time(p, cputime, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Account for involuntary wait time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * @cputime: the CPU time spent in involuntary wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void account_steal_time(u64 cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u64 *cpustat = kcpustat_this_cpu->cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) cpustat[CPUTIME_STEAL] += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Account for idle time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * @cputime: the CPU time spent in idle wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void account_idle_time(u64 cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u64 *cpustat = kcpustat_this_cpu->cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct rq *rq = this_rq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (atomic_read(&rq->nr_iowait) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) cpustat[CPUTIME_IOWAIT] += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) cpustat[CPUTIME_IDLE] += cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * When a guest is interrupted for a longer amount of time, missed clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * ticks are not redelivered later. Due to that, this function may on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * occasion account more time than the calling functions think elapsed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static __always_inline u64 steal_account_process_time(u64 maxtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifdef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (static_key_false(¶virt_steal_enabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u64 steal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) steal = paravirt_steal_clock(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) steal -= this_rq()->prev_steal_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) steal = min(steal, maxtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) account_steal_time(steal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) this_rq()->prev_steal_time += steal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return steal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * Account how much elapsed time was spent in steal, irq, or softirq time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static inline u64 account_other_time(u64 max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u64 accounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) accounted = steal_account_process_time(max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (accounted < max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) accounted += irqtime_tick_accounted(max - accounted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return accounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static inline u64 read_sum_exec_runtime(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return t->se.sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static u64 read_sum_exec_runtime(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u64 ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct rq_flags rf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) rq = task_rq_lock(t, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ns = t->se.sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) task_rq_unlock(rq, t, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * tasks (sum on group iteration) belonging to @tsk's group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct signal_struct *sig = tsk->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u64 utime, stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned int seq, nextseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Update current task runtime to account pending time since last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * scheduler action or thread_group_cputime() call. This thread group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * might have other running tasks on different CPUs, but updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * their runtime can affect syscall performance, so we skip account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * those pending times and rely only on values updated on tick or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * other scheduler action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (same_thread_group(current, tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) (void) task_sched_runtime(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Attempt a lockless read on the first round. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) nextseq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) seq = nextseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) times->utime = sig->utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) times->stime = sig->stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) times->sum_exec_runtime = sig->sum_sched_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) for_each_thread(tsk, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) task_cputime(t, &utime, &stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) times->utime += utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) times->stime += stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) times->sum_exec_runtime += read_sum_exec_runtime(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* If lockless access failed, take the lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) nextseq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) } while (need_seqretry(&sig->stats_lock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #ifdef CONFIG_IRQ_TIME_ACCOUNTING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Account a tick to a process and cpustat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * @p: the process that the CPU time gets accounted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * @user_tick: is the tick from userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * @rq: the pointer to rq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Tick demultiplexing follows the order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * - pending hardirq update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * - pending softirq update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * - user_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * - idle_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * - system time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * - check for guest_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * - else account as system_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * Check for hardirq is done both for system and user time as there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * no timer going off while we are on hardirq and hence we may never get an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * opportunity to update it solely in system time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * p->stime and friends are only updated on system time and not on irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * softirq as those do not count in task exec_runtime any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int ticks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) u64 other, cputime = TICK_NSEC * ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * When returning from idle, many ticks can get accounted at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * once, including some ticks of steal, irq, and softirq time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * Subtract those ticks from the amount of time accounted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * idle, or potentially user or system time. Due to rounding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * other time can exceed ticks occasionally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) other = account_other_time(ULONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (other >= cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) cputime -= other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (this_cpu_ksoftirqd() == p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * ksoftirqd time do not get accounted in cpu_softirq_time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * So, we have to handle it separately here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Also, p->stime needs to be updated for ksoftirqd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) } else if (user_tick) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) account_user_time(p, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } else if (p == this_rq()->idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) account_idle_time(cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) } else if (p->flags & PF_VCPU) { /* System time or guest time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) account_guest_time(p, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) account_system_index_time(p, cputime, CPUTIME_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) trace_android_vh_irqtime_account_process_tick(p, this_rq(), user_tick, ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static void irqtime_account_idle_ticks(int ticks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) irqtime_account_process_tick(current, 0, ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #else /* CONFIG_IRQ_TIME_ACCOUNTING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static inline void irqtime_account_idle_ticks(int ticks) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int nr_ticks) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * Use precise platform statistics if available:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) # ifndef __ARCH_HAS_VTIME_TASK_SWITCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) void vtime_task_switch(struct task_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (is_idle_task(prev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) vtime_account_idle(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) vtime_account_kernel(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) vtime_flush(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) arch_vtime_task_switch(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Archs that account the whole time spent in the idle task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * (outside irq) as idle time can rely on this and just implement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * vtime_account_kernel() and vtime_account_idle(). Archs that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * have other meaning of the idle time (s390 only includes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * time spent by the CPU when it's in low power mode) must override
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * vtime_account().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) #ifndef __ARCH_HAS_VTIME_ACCOUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) void vtime_account_irq_enter(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!in_interrupt() && is_idle_task(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) vtime_account_idle(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) vtime_account_kernel(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #endif /* __ARCH_HAS_VTIME_ACCOUNT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) u64 *ut, u64 *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *ut = curr->utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) *st = curr->stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) *ut = p->utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *st = p->stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) EXPORT_SYMBOL_GPL(task_cputime_adjusted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct task_cputime cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) thread_group_cputime(p, &cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) *ut = cputime.utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) *st = cputime.stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Account a single tick of CPU time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * @p: the process that the CPU time gets accounted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @user_tick: indicates if the tick is a user or a system tick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) void account_process_tick(struct task_struct *p, int user_tick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) u64 cputime, steal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (vtime_accounting_enabled_this_cpu())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) trace_android_vh_account_task_time(p, this_rq(), user_tick);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (sched_clock_irqtime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) irqtime_account_process_tick(p, user_tick, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) cputime = TICK_NSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) steal = steal_account_process_time(ULONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (steal >= cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) cputime -= steal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (user_tick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) account_user_time(p, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) account_system_time(p, HARDIRQ_OFFSET, cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) account_idle_time(cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * Account multiple ticks of idle time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * @ticks: number of stolen ticks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) void account_idle_ticks(unsigned long ticks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) u64 cputime, steal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (sched_clock_irqtime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) irqtime_account_idle_ticks(ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) cputime = ticks * TICK_NSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) steal = steal_account_process_time(ULONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (steal >= cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) cputime -= steal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) account_idle_time(cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * Adjust tick based cputime random precision against scheduler runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * Tick based cputime accounting depend on random scheduling timeslices of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * task to be interrupted or not by the timer. Depending on these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * circumstances, the number of these interrupts may be over or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * under-optimistic, matching the real user and system cputime with a variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * precision.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Fix this by scaling these tick based values against the total runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * accounted by the CFS scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * This code provides the following guarantees:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * stime + utime == rtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * stime_i+1 >= stime_i, utime_i+1 >= utime_i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * Assuming that rtime_i+1 >= rtime_i.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) u64 *ut, u64 *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) u64 rtime, stime, utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Serialize concurrent callers such that we can honour our guarantees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) raw_spin_lock_irqsave(&prev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rtime = curr->sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * This is possible under two circumstances:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * - rtime isn't monotonic after all (a bug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * - we got reordered by the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * In both cases this acts as a filter such that the rest of the code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * can assume it is monotonic regardless of anything else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (prev->stime + prev->utime >= rtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) stime = curr->stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) utime = curr->utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * If either stime or utime are 0, assume all runtime is userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * Once a task gets some ticks, the monotonicy code at 'update:'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * will ensure things converge to the observed ratio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (stime == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) utime = rtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (utime == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) stime = rtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) goto update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) update:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * Make sure stime doesn't go backwards; this preserves monotonicity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * for utime because rtime is monotonic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * utime_i+1 = rtime_i+1 - stime_i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * = rtime_i+1 - (rtime_i - utime_i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * = (rtime_i+1 - rtime_i) + utime_i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * >= utime_i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (stime < prev->stime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) stime = prev->stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) utime = rtime - stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * Make sure utime doesn't go backwards; this still preserves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * monotonicity for stime, analogous argument to above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (utime < prev->utime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) utime = prev->utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) stime = rtime - utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) prev->stime = stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) prev->utime = utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *ut = prev->utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) *st = prev->stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) raw_spin_unlock_irqrestore(&prev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct task_cputime cputime = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) .sum_exec_runtime = p->se.sum_exec_runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) task_cputime(p, &cputime.utime, &cputime.stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) cputime_adjust(&cputime, &p->prev_cputime, ut, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) EXPORT_SYMBOL_GPL(task_cputime_adjusted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct task_cputime cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) thread_group_cputime(p, &cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static u64 vtime_delta(struct vtime *vtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) unsigned long long clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) clock = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (clock < vtime->starttime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return clock - vtime->starttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static u64 get_vtime_delta(struct vtime *vtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) u64 delta = vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u64 other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Unlike tick based timing, vtime based timing never has lost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * ticks, and no need for steal time accounting to make up for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * lost ticks. Vtime accounts a rounded version of actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * elapsed time. Limit account_other_time to prevent rounding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * errors from causing elapsed vtime to go negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) other = account_other_time(delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) vtime->starttime += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return delta - other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static void vtime_account_system(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct vtime *vtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) vtime->stime += get_vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (vtime->stime >= TICK_NSEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) account_system_time(tsk, irq_count(), vtime->stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) vtime->stime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static void vtime_account_guest(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct vtime *vtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) vtime->gtime += get_vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (vtime->gtime >= TICK_NSEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) account_guest_time(tsk, vtime->gtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) vtime->gtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static void __vtime_account_kernel(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct vtime *vtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* We might have scheduled out from guest path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (vtime->state == VTIME_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) vtime_account_guest(tsk, vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) vtime_account_system(tsk, vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) void vtime_account_kernel(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct vtime *vtime = &tsk->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!vtime_delta(vtime))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) write_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) __vtime_account_kernel(tsk, vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) write_seqcount_end(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) void vtime_user_enter(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct vtime *vtime = &tsk->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) write_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) vtime_account_system(tsk, vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) vtime->state = VTIME_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) write_seqcount_end(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) void vtime_user_exit(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct vtime *vtime = &tsk->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) write_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) vtime->utime += get_vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (vtime->utime >= TICK_NSEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) account_user_time(tsk, vtime->utime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) vtime->utime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) vtime->state = VTIME_SYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) write_seqcount_end(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) void vtime_guest_enter(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct vtime *vtime = &tsk->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * The flags must be updated under the lock with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * the vtime_starttime flush and update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * That enforces a right ordering and update sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * synchronization against the reader (task_gtime())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * that can thus safely catch up with a tickless delta.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) write_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) vtime_account_system(tsk, vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) tsk->flags |= PF_VCPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) vtime->state = VTIME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) write_seqcount_end(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) EXPORT_SYMBOL_GPL(vtime_guest_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) void vtime_guest_exit(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct vtime *vtime = &tsk->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) write_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) vtime_account_guest(tsk, vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) tsk->flags &= ~PF_VCPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) vtime->state = VTIME_SYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) write_seqcount_end(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) EXPORT_SYMBOL_GPL(vtime_guest_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) void vtime_account_idle(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) account_idle_time(get_vtime_delta(&tsk->vtime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) void vtime_task_switch_generic(struct task_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct vtime *vtime = &prev->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) write_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (vtime->state == VTIME_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) vtime_account_idle(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) __vtime_account_kernel(prev, vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) vtime->state = VTIME_INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) vtime->cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) write_seqcount_end(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) vtime = ¤t->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) write_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (is_idle_task(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) vtime->state = VTIME_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) else if (current->flags & PF_VCPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) vtime->state = VTIME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) vtime->state = VTIME_SYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) vtime->starttime = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) vtime->cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) write_seqcount_end(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) void vtime_init_idle(struct task_struct *t, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct vtime *vtime = &t->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) write_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) vtime->state = VTIME_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) vtime->starttime = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) vtime->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) write_seqcount_end(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) u64 task_gtime(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct vtime *vtime = &t->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) u64 gtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (!vtime_accounting_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return t->gtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) seq = read_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) gtime = t->gtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (vtime->state == VTIME_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) gtime += vtime->gtime + vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) } while (read_seqcount_retry(&vtime->seqcount, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return gtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Fetch cputime raw values from fields of task_struct and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * add up the pending nohz execution time since the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * cputime snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct vtime *vtime = &t->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (!vtime_accounting_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) *utime = t->utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) *stime = t->stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) seq = read_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) *utime = t->utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) *stime = t->stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* Task is sleeping or idle, nothing to add */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (vtime->state < VTIME_SYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) delta = vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Task runs either in user (including guest) or kernel space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * add pending nohz time to the right place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (vtime->state == VTIME_SYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) *stime += vtime->stime + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) *utime += vtime->utime + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) } while (read_seqcount_retry(&vtime->seqcount, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static int vtime_state_fetch(struct vtime *vtime, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int state = READ_ONCE(vtime->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * We raced against a context switch, fetch the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * kcpustat task again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (vtime->cpu != cpu && vtime->cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * Two possible things here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * 1) We are seeing the scheduling out task (prev) or any past one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * 2) We are seeing the scheduling in task (next) but it hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * passed though vtime_task_switch() yet so the pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * cputime of the prev task may not be flushed yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (state == VTIME_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static u64 kcpustat_user_vtime(struct vtime *vtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (vtime->state == VTIME_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return vtime->utime + vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) else if (vtime->state == VTIME_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return vtime->gtime + vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static int kcpustat_field_vtime(u64 *cpustat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) enum cpu_usage_stat usage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) int cpu, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct vtime *vtime = &tsk->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) seq = read_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) state = vtime_state_fetch(vtime, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (state < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) *val = cpustat[usage];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * Nice VS unnice cputime accounting may be inaccurate if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * the nice value has changed since the last vtime update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * But proper fix would involve interrupting target on nice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * updates which is a no go on nohz_full (although the scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * may still interrupt the target if rescheduling is needed...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) switch (usage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) case CPUTIME_SYSTEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (state == VTIME_SYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) *val += vtime->stime + vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) case CPUTIME_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (task_nice(tsk) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) *val += kcpustat_user_vtime(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) case CPUTIME_NICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (task_nice(tsk) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) *val += kcpustat_user_vtime(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) case CPUTIME_GUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (state == VTIME_GUEST && task_nice(tsk) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) *val += vtime->gtime + vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case CPUTIME_GUEST_NICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (state == VTIME_GUEST && task_nice(tsk) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) *val += vtime->gtime + vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) } while (read_seqcount_retry(&vtime->seqcount, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) u64 kcpustat_field(struct kernel_cpustat *kcpustat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) enum cpu_usage_stat usage, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) u64 *cpustat = kcpustat->cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) u64 val = cpustat[usage];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (!vtime_accounting_enabled_cpu(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct task_struct *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) curr = rcu_dereference(rq->curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (WARN_ON_ONCE(!curr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return cpustat[usage];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) EXPORT_SYMBOL_GPL(kcpustat_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) const struct kernel_cpustat *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct task_struct *tsk, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct vtime *vtime = &tsk->vtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) u64 *cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) seq = read_seqcount_begin(&vtime->seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) state = vtime_state_fetch(vtime, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (state < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) *dst = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) cpustat = dst->cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /* Task is sleeping, dead or idle, nothing to add */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (state < VTIME_SYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) delta = vtime_delta(vtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * Task runs either in user (including guest) or kernel space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * add pending nohz time to the right place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (state == VTIME_SYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) } else if (state == VTIME_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (task_nice(tsk) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) cpustat[CPUTIME_NICE] += vtime->utime + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) cpustat[CPUTIME_USER] += vtime->utime + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) WARN_ON_ONCE(state != VTIME_GUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (task_nice(tsk) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) cpustat[CPUTIME_NICE] += vtime->gtime + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) cpustat[CPUTIME_USER] += vtime->gtime + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) } while (read_seqcount_retry(&vtime->seqcount, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (!vtime_accounting_enabled_cpu(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) *dst = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct task_struct *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) curr = rcu_dereference(rq->curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (WARN_ON_ONCE(!curr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) *dst = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */