^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Do not use in new code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _BLK_CGROUP_RWSTAT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _BLK_CGROUP_RWSTAT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/blk-cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) enum blkg_rwstat_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) BLKG_RWSTAT_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) BLKG_RWSTAT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) BLKG_RWSTAT_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) BLKG_RWSTAT_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) BLKG_RWSTAT_DISCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) BLKG_RWSTAT_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * recursive. Used to carry stats of dead children.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct blkg_rwstat {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) atomic64_t aux_cnt[BLKG_RWSTAT_NR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct blkg_rwstat_sample {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u64 cnt[BLKG_RWSTAT_NR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return atomic64_read(&rwstat->aux_cnt[idx]) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void blkg_rwstat_exit(struct blkg_rwstat *rwstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) const struct blkg_rwstat_sample *rwstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int off, struct blkg_rwstat_sample *sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * blkg_rwstat_add - add a value to a blkg_rwstat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @rwstat: target blkg_rwstat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @op: REQ_OP and flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @val: value to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Add @val to @rwstat. The counters are chosen according to @rw. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * caller is responsible for synchronizing calls to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned int op, uint64_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct percpu_counter *cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (op_is_discard(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) else if (op_is_write(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (op_is_sync(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * blkg_rwstat_read - read the current values of a blkg_rwstat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @rwstat: blkg_rwstat to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Read the current snapshot of @rwstat and return it in the aux counts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct blkg_rwstat_sample *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) for (i = 0; i < BLKG_RWSTAT_NR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) result->cnt[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * blkg_rwstat_total - read the total count of a blkg_rwstat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @rwstat: blkg_rwstat to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Return the total count of @rwstat regardless of the IO direction. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * function can be called without synchronization and takes care of u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * atomicity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct blkg_rwstat_sample tmp = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) blkg_rwstat_read(rwstat, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * blkg_rwstat_reset - reset a blkg_rwstat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @rwstat: blkg_rwstat to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) for (i = 0; i < BLKG_RWSTAT_NR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) percpu_counter_set(&rwstat->cpu_cnt[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) atomic64_set(&rwstat->aux_cnt[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * @to: the destination blkg_rwstat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @from: the source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Add @from's count including the aux one to @to's aux count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct blkg_rwstat *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u64 sum[BLKG_RWSTAT_NR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) for (i = 0; i < BLKG_RWSTAT_NR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) for (i = 0; i < BLKG_RWSTAT_NR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) &to->aux_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif /* _BLK_CGROUP_RWSTAT_H */