^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * CPU accounting code for task groups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (balbir@in.ibm.com).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/irq_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* Time spent by the tasks of the CPU accounting group executing in ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) enum cpuacct_stat_index {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) CPUACCT_STAT_USER, /* ... user mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) CPUACCT_STAT_SYSTEM, /* ... kernel mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) CPUACCT_STAT_NSTATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static const char * const cpuacct_stat_desc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) [CPUACCT_STAT_USER] = "user",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) [CPUACCT_STAT_SYSTEM] = "system",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct cpuacct_usage {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) u64 usages[CPUACCT_STAT_NSTATS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* track CPU usage of a group of tasks and its child groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct cpuacct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct cgroup_subsys_state css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* cpuusage holds pointer to a u64-type object on every CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct cpuacct_usage __percpu *cpuusage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct kernel_cpustat __percpu *cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return css ? container_of(css, struct cpuacct, css) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Return CPU accounting group to which this task belongs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline struct cpuacct *task_ca(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return css_ca(task_css(tsk, cpuacct_cgrp_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline struct cpuacct *parent_ca(struct cpuacct *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return css_ca(ca->css.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static DEFINE_PER_CPU(struct cpuacct_usage, root_cpuacct_cpuusage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static struct cpuacct root_cpuacct = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) .cpustat = &kernel_cpustat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .cpuusage = &root_cpuacct_cpuusage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Create a new CPU accounting group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static struct cgroup_subsys_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct cpuacct *ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!parent_css)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return &root_cpuacct.css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ca = kzalloc(sizeof(*ca), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ca->cpuusage = alloc_percpu(struct cpuacct_usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!ca->cpuusage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) goto out_free_ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ca->cpustat = alloc_percpu(struct kernel_cpustat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!ca->cpustat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) goto out_free_cpuusage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return &ca->css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) out_free_cpuusage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) free_percpu(ca->cpuusage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) out_free_ca:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) kfree(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Destroy an existing CPU accounting group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void cpuacct_css_free(struct cgroup_subsys_state *css)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct cpuacct *ca = css_ca(css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) free_percpu(ca->cpustat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) free_percpu(ca->cpuusage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) kfree(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) enum cpuacct_stat_index index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u64 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * We allow index == CPUACCT_STAT_NSTATS here to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * the sum of suages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) BUG_ON(index > CPUACCT_STAT_NSTATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #ifndef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * Take rq->lock to make 64-bit read safe on 32-bit platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) raw_spin_lock_irq(&cpu_rq(cpu)->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (index == CPUACCT_STAT_NSTATS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) data += cpuusage->usages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) data = cpuusage->usages[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #ifndef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #ifndef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Take rq->lock to make 64-bit write safe on 32-bit platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) raw_spin_lock_irq(&cpu_rq(cpu)->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) cpuusage->usages[i] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #ifndef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Return total CPU usage (in nanoseconds) of a group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static u64 __cpuusage_read(struct cgroup_subsys_state *css,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) enum cpuacct_stat_index index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct cpuacct *ca = css_ca(css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u64 totalcpuusage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) totalcpuusage += cpuacct_cpuusage_read(ca, i, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return totalcpuusage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static u64 cpuusage_user_read(struct cgroup_subsys_state *css,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct cftype *cft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return __cpuusage_read(css, CPUACCT_STAT_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static u64 cpuusage_sys_read(struct cgroup_subsys_state *css,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct cftype *cft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return __cpuusage_read(css, CPUACCT_STAT_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return __cpuusage_read(css, CPUACCT_STAT_NSTATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct cpuacct *ca = css_ca(css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Only allow '0' here to do a reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) cpuacct_cpuusage_write(ca, cpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int __cpuacct_percpu_seq_show(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) enum cpuacct_stat_index index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct cpuacct *ca = css_ca(seq_css(m));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u64 percpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) percpu = cpuacct_cpuusage_read(ca, i, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) seq_printf(m, "%llu ", (unsigned long long) percpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) seq_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int cpuacct_percpu_user_seq_show(struct seq_file *m, void *V)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int cpuacct_percpu_sys_seq_show(struct seq_file *m, void *V)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_NSTATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int cpuacct_all_seq_show(struct seq_file *m, void *V)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct cpuacct *ca = css_ca(seq_css(m));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) seq_puts(m, "cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) for (index = 0; index < CPUACCT_STAT_NSTATS; index++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) seq_printf(m, " %s", cpuacct_stat_desc[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) seq_printf(m, "%d", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) for (index = 0; index < CPUACCT_STAT_NSTATS; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #ifndef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * Take rq->lock to make 64-bit read safe on 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) raw_spin_lock_irq(&cpu_rq(cpu)->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) seq_printf(m, " %llu", cpuusage->usages[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #ifndef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int cpuacct_stats_show(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct cpuacct *ca = css_ca(seq_css(sf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) s64 val[CPUACCT_STAT_NSTATS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) memset(val, 0, sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) val[CPUACCT_STAT_USER] += cpustat[CPUTIME_USER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) val[CPUACCT_STAT_USER] += cpustat[CPUTIME_NICE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SYSTEM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_IRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SOFTIRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) seq_printf(sf, "%s %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) cpuacct_stat_desc[stat],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) (long long)nsec_to_clock_t(val[stat]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static struct cftype files[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) .name = "usage",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) .read_u64 = cpuusage_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) .write_u64 = cpuusage_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) .name = "usage_user",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) .read_u64 = cpuusage_user_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) .name = "usage_sys",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) .read_u64 = cpuusage_sys_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) .name = "usage_percpu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) .seq_show = cpuacct_percpu_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .name = "usage_percpu_user",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) .seq_show = cpuacct_percpu_user_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) .name = "usage_percpu_sys",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) .seq_show = cpuacct_percpu_sys_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .name = "usage_all",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) .seq_show = cpuacct_all_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) .name = "stat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) .seq_show = cpuacct_stats_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) { } /* terminate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * charge this task's execution time to its accounting group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * called with rq->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void cpuacct_charge(struct task_struct *tsk, u64 cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct cpuacct *ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int index = CPUACCT_STAT_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct pt_regs *regs = get_irq_regs() ? : task_pt_regs(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (regs && user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) index = CPUACCT_STAT_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) __this_cpu_add(ca->cpuusage->usages[index], cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * Add user/system time to cpuacct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Note: it's the caller that updates the account of the root cgroup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) void cpuacct_account_field(struct task_struct *tsk, int index, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct cpuacct *ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) __this_cpu_add(ca->cpustat->cpustat[index], val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct cgroup_subsys cpuacct_cgrp_subsys = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .css_alloc = cpuacct_css_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .css_free = cpuacct_css_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) .legacy_cftypes = files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) .early_init = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };