^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include "cgroup-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/sched/cputime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) static DEFINE_SPINLOCK(cgroup_rstat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) return per_cpu_ptr(cgrp->rstat_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * cgroup_rstat_updated - keep track of updated rstat_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * @cgrp: target cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * @cpu: cpu on which rstat_cpu was updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * @cgrp's rstat_cpu on @cpu was updated. Put it on the parent's matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * rstat_cpu->updated_children list. See the comment on top of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * cgroup_rstat_cpu definition for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct cgroup *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* nothing to do for root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (!cgroup_parent(cgrp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Speculative already-on-list test. This may race leading to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * temporary inaccuracies, which is fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Because @parent's updated_children is terminated with @parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * instead of NULL, we can tell whether @cgrp is on the list by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * testing the next pointer for NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (cgroup_rstat_cpu(cgrp, cpu)->updated_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) raw_spin_lock_irqsave(cpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* put @cgrp and all ancestors on the corresponding updated lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) for (parent = cgroup_parent(cgrp); parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) cgrp = parent, parent = cgroup_parent(cgrp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Both additions and removals are bottom-up. If a cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * is already in the tree, all ancestors are.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (rstatc->updated_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) rstatc->updated_next = prstatc->updated_children;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) prstatc->updated_children = cgrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) raw_spin_unlock_irqrestore(cpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @pos: current position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @root: root of the tree to traversal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @cpu: target cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Walks the udpated rstat_cpu tree on @cpu from @root. %NULL @pos starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * the traversal and %NULL return indicates the end. During traversal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * each returned cgroup is unlinked from the tree. Must be called with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * matching cgroup_rstat_cpu_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * The only ordering guarantee is that, for a parent and a child pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * covered by a given traversal, if a child is visited, its parent is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * guaranteed to be visited afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct cgroup *root, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct cgroup_rstat_cpu *rstatc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (pos == root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * We're gonna walk down to the first leaf and visit/remove it. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * can pick whatever unvisited node as the starting point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (!pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) pos = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) pos = cgroup_parent(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* walk down to the first leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) rstatc = cgroup_rstat_cpu(pos, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (rstatc->updated_children == pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) pos = rstatc->updated_children;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Unlink @pos from the tree. As the updated_children list is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * singly linked, we have to walk it to find the removal point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * However, due to the way we traverse, @pos will be the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * child in most cases. The only exception is @root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (rstatc->updated_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct cgroup *parent = cgroup_parent(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct cgroup_rstat_cpu *nrstatc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct cgroup **nextp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) nextp = &prstatc->updated_children;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) nrstatc = cgroup_rstat_cpu(*nextp, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (*nextp == pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) WARN_ON_ONCE(*nextp == parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) nextp = &nrstatc->updated_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *nextp = rstatc->updated_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) rstatc->updated_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* only happens for @root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* see cgroup_rstat_flush() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) lockdep_assert_held(&cgroup_rstat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct cgroup *pos = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) raw_spin_lock(cpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct cgroup_subsys_state *css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) cgroup_base_stat_flush(pos, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) list_for_each_entry_rcu(css, &pos->rstat_css_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) rstat_css_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) css->ss->css_rstat_flush(css, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) raw_spin_unlock(cpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* if @may_sleep, play nice and yield if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (may_sleep && (need_resched() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spin_needbreak(&cgroup_rstat_lock))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) spin_unlock_irq(&cgroup_rstat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!cond_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) spin_lock_irq(&cgroup_rstat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * cgroup_rstat_flush - flush stats in @cgrp's subtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @cgrp: target cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Collect all per-cpu stats in @cgrp's subtree into the global counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * and propagate them upwards. After this function returns, all cgroups in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * the subtree have up-to-date ->stat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * This also gets all cgroups in the subtree including @cgrp off the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * ->updated_children lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * This function may block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void cgroup_rstat_flush(struct cgroup *cgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) spin_lock_irq(&cgroup_rstat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) cgroup_rstat_flush_locked(cgrp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_unlock_irq(&cgroup_rstat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @cgrp: target cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * This function can be called from any context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) spin_lock_irqsave(&cgroup_rstat_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) cgroup_rstat_flush_locked(cgrp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * cgroup_rstat_flush_begin - flush stats in @cgrp's subtree and hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * @cgrp: target cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Flush stats in @cgrp's subtree and prevent further flushes. Must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * paired with cgroup_rstat_flush_release().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * This function may block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void cgroup_rstat_flush_hold(struct cgroup *cgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __acquires(&cgroup_rstat_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spin_lock_irq(&cgroup_rstat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) cgroup_rstat_flush_locked(cgrp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) void cgroup_rstat_flush_release(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) __releases(&cgroup_rstat_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) spin_unlock_irq(&cgroup_rstat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int cgroup_rstat_init(struct cgroup *cgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* the root cgrp has rstat_cpu preallocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!cgrp->rstat_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!cgrp->rstat_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* ->updated_children list is self terminated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) rstatc->updated_children = cgrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u64_stats_init(&rstatc->bsync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) void cgroup_rstat_exit(struct cgroup *cgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) cgroup_rstat_flush(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) WARN_ON_ONCE(rstatc->updated_next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) free_percpu(cgrp->rstat_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) cgrp->rstat_cpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void __init cgroup_rstat_boot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) BUG_ON(cgroup_rstat_init(&cgrp_dfl_root.cgrp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Functions for cgroup basic resource statistics implemented on top of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * rstat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct cgroup_base_stat *src_bstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dst_bstat->cputime.utime += src_bstat->cputime.utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dst_bstat->cputime.stime += src_bstat->cputime.stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct cgroup_base_stat *src_bstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) dst_bstat->cputime.utime -= src_bstat->cputime.utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) dst_bstat->cputime.stime -= src_bstat->cputime.stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct cgroup *parent = cgroup_parent(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct cgroup_base_stat cur, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* fetch the current per-cpu values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) seq = __u64_stats_fetch_begin(&rstatc->bsync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) cur.cputime = rstatc->bstat.cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* propagate percpu delta to global */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) delta = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cgroup_base_stat_add(&cgrp->bstat, &delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) cgroup_base_stat_add(&rstatc->last_bstat, &delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* propagate global delta to parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) delta = cgrp->bstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) cgroup_base_stat_add(&parent->bstat, &delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) cgroup_base_stat_add(&cgrp->last_bstat, &delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static struct cgroup_rstat_cpu *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct cgroup_rstat_cpu *rstatc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rstatc = get_cpu_ptr(cgrp->rstat_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) u64_stats_update_begin(&rstatc->bsync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return rstatc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct cgroup_rstat_cpu *rstatc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) u64_stats_update_end(&rstatc->bsync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) cgroup_rstat_updated(cgrp, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) put_cpu_ptr(rstatc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct cgroup_rstat_cpu *rstatc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cgroup_base_stat_cputime_account_end(cgrp, rstatc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void __cgroup_account_cputime_field(struct cgroup *cgrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) enum cpu_usage_stat index, u64 delta_exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct cgroup_rstat_cpu *rstatc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) case CPUTIME_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) case CPUTIME_NICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) rstatc->bstat.cputime.utime += delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) case CPUTIME_SYSTEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) case CPUTIME_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) case CPUTIME_SOFTIRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rstatc->bstat.cputime.stime += delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) cgroup_base_stat_cputime_account_end(cgrp, rstatc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * compute the cputime for the root cgroup by getting the per cpu data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * at a global level, then categorizing the fields in a manner consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * with how it is done by __cgroup_account_cputime_field for each bit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * cpu time attributed to a cgroup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static void root_cgroup_cputime(struct task_cputime *cputime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) cputime->stime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) cputime->utime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) cputime->sum_exec_runtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct kernel_cpustat kcpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) u64 *cpustat = kcpustat.cpustat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) u64 user = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) u64 sys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) kcpustat_cpu_fetch(&kcpustat, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) user += cpustat[CPUTIME_USER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) user += cpustat[CPUTIME_NICE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) cputime->utime += user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) sys += cpustat[CPUTIME_SYSTEM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) sys += cpustat[CPUTIME_IRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) sys += cpustat[CPUTIME_SOFTIRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) cputime->stime += sys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) cputime->sum_exec_runtime += user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) cputime->sum_exec_runtime += sys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) void cgroup_base_stat_cputime_show(struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct cgroup *cgrp = seq_css(seq)->cgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) u64 usage, utime, stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct task_cputime cputime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (cgroup_parent(cgrp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) cgroup_rstat_flush_hold(cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) usage = cgrp->bstat.cputime.sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) &utime, &stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) cgroup_rstat_flush_release();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) root_cgroup_cputime(&cputime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) usage = cputime.sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) utime = cputime.utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) stime = cputime.stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) do_div(usage, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) do_div(utime, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) do_div(stime, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) seq_printf(seq, "usage_usec %llu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) "user_usec %llu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) "system_usec %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) usage, utime, stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }