^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * cgroups support for the BFQ I/O scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/elevator.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ioprio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sbitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "bfq-iosched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #ifdef CONFIG_BFQ_CGROUP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) atomic64_set(&stat->aux_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static void bfq_stat_exit(struct bfq_stat *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) percpu_counter_destroy(&stat->cpu_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * bfq_stat_add - add a value to a bfq_stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * @stat: target bfq_stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * @val: value to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Add @val to @stat. The caller must ensure that IRQ on the same CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * don't re-enter this function for the same counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * bfq_stat_read - read the current value of a bfq_stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @stat: bfq_stat to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return percpu_counter_sum_positive(&stat->cpu_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * bfq_stat_reset - reset a bfq_stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @stat: bfq_stat to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline void bfq_stat_reset(struct bfq_stat *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) percpu_counter_set(&stat->cpu_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) atomic64_set(&stat->aux_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * bfq_stat_add_aux - add a bfq_stat into another's aux count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @to: the destination bfq_stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @from: the source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Add @from's count including the aux one to @to's aux count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static inline void bfq_stat_add_aux(struct bfq_stat *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct bfq_stat *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) &to->aux_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * blkg_prfill_stat - prfill callback for bfq_stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @sf: seq_file to print to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @pd: policy private data of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @off: offset to the bfq_stat in @pd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * prfill callback for printing a bfq_stat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* bfqg stats flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) enum bfqg_stats_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) BFQG_stats_waiting = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) BFQG_stats_idling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) BFQG_stats_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define BFQG_FLAG_FNS(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) stats->flags |= (1 << BFQG_stats_##name); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) stats->flags &= ~(1 << BFQG_stats_##name); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static int bfqg_stats_##name(struct bfqg_stats *stats) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) BFQG_FLAG_FNS(waiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) BFQG_FLAG_FNS(idling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) BFQG_FLAG_FNS(empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #undef BFQG_FLAG_FNS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* This should be called with the scheduler lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (!bfqg_stats_waiting(stats))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) now = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (now > stats->start_group_wait_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bfq_stat_add(&stats->group_wait_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) now - stats->start_group_wait_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) bfqg_stats_clear_waiting(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* This should be called with the scheduler lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct bfq_group *curr_bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct bfqg_stats *stats = &bfqg->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (bfqg_stats_waiting(stats))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (bfqg == curr_bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) stats->start_group_wait_time = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) bfqg_stats_mark_waiting(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* This should be called with the scheduler lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!bfqg_stats_empty(stats))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) now = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (now > stats->start_empty_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) bfq_stat_add(&stats->empty_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) now - stats->start_empty_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bfqg_stats_clear_empty(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bfq_stat_add(&bfqg->stats.dequeue, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct bfqg_stats *stats = &bfqg->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (blkg_rwstat_total(&stats->queued))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * group is already marked empty. This can happen if bfqq got new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * request in parent group and moved to this group while being added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * to service tree. Just ignore the event and move on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (bfqg_stats_empty(stats))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) stats->start_empty_time = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bfqg_stats_mark_empty(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct bfqg_stats *stats = &bfqg->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (bfqg_stats_idling(stats)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u64 now = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (now > stats->start_idle_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bfq_stat_add(&stats->idle_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) now - stats->start_idle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bfqg_stats_clear_idling(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct bfqg_stats *stats = &bfqg->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) stats->start_idle_time = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) bfqg_stats_mark_idling(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct bfqg_stats *stats = &bfqg->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) bfq_stat_add(&stats->avg_queue_size_sum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) blkg_rwstat_total(&stats->queued));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) bfq_stat_add(&stats->avg_queue_size_samples, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) bfqg_stats_update_group_wait_time(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) blkg_rwstat_add(&bfqg->stats.queued, op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bfqg_stats_end_empty_time(&bfqg->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) blkg_rwstat_add(&bfqg->stats.queued, op, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) blkg_rwstat_add(&bfqg->stats.merged, op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u64 io_start_time_ns, unsigned int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct bfqg_stats *stats = &bfqg->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u64 now = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (now > io_start_time_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) blkg_rwstat_add(&stats->service_time, op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) now - io_start_time_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (io_start_time_ns > start_time_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) blkg_rwstat_add(&stats->wait_time, op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) io_start_time_ns - start_time_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #else /* CONFIG_BFQ_CGROUP_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned int op) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u64 io_start_time_ns, unsigned int op) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #endif /* CONFIG_BFQ_CGROUP_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #ifdef CONFIG_BFQ_GROUP_IOSCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * blk-cgroup policy-related handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * The following functions help in converting between blk-cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * internal structures and BFQ-specific structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return pd ? container_of(pd, struct bfq_group, pd) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return pd_to_blkg(&bfqg->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * bfq_group handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * The following functions help in navigating the bfq_group hierarchy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * by allowing to find the parent of a bfq_group or the bfq_group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * associated to a bfq_queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return pblkg ? blkg_to_bfqg(pblkg) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct bfq_entity *group_entity = bfqq->entity.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return group_entity ? container_of(group_entity, struct bfq_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) entity) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bfqq->bfqd->root_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * The following two functions handle get and put of a bfq_group by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * wrapping the related blk-cgroup hooks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static void bfqg_get(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) bfqg->ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void bfqg_put(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) bfqg->ref--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (bfqg->ref == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) kfree(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static void bfqg_and_blkg_get(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) bfqg_get(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) blkg_get(bfqg_to_blkg(bfqg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) void bfqg_and_blkg_put(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) blkg_put(bfqg_to_blkg(bfqg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) bfqg_put(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* @stats = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static void bfqg_stats_reset(struct bfqg_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #ifdef CONFIG_BFQ_CGROUP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* queued stats shouldn't be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) blkg_rwstat_reset(&stats->merged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) blkg_rwstat_reset(&stats->service_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) blkg_rwstat_reset(&stats->wait_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) bfq_stat_reset(&stats->time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) bfq_stat_reset(&stats->avg_queue_size_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) bfq_stat_reset(&stats->avg_queue_size_samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) bfq_stat_reset(&stats->dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) bfq_stat_reset(&stats->group_wait_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) bfq_stat_reset(&stats->idle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) bfq_stat_reset(&stats->empty_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* @to += @from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (!to || !from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #ifdef CONFIG_BFQ_CGROUP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* queued stats shouldn't be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) blkg_rwstat_add_aux(&to->merged, &from->merged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) blkg_rwstat_add_aux(&to->service_time, &from->service_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) bfq_stat_add_aux(&from->time, &from->time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) bfq_stat_add_aux(&to->avg_queue_size_samples,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) &from->avg_queue_size_samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) bfq_stat_add_aux(&to->dequeue, &from->dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) bfq_stat_add_aux(&to->idle_time, &from->idle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) bfq_stat_add_aux(&to->empty_time, &from->empty_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * recursive stats can still account for the amount used by this bfqg after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * it's gone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct bfq_group *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!bfqg) /* root_group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) parent = bfqg_parent(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (unlikely(!parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) bfqg_stats_reset(&bfqg->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) entity->weight = entity->new_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) entity->orig_weight = entity->new_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (bfqq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) bfqq->ioprio = bfqq->new_ioprio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) bfqq->ioprio_class = bfqq->new_ioprio_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Make sure that bfqg and its associated blkg do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * disappear before entity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) bfqg_and_blkg_get(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) entity->parent = bfqg->my_entity; /* NULL for root group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) entity->sched_data = &bfqg->sched_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void bfqg_stats_exit(struct bfqg_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) blkg_rwstat_exit(&stats->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) blkg_rwstat_exit(&stats->ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #ifdef CONFIG_BFQ_CGROUP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) blkg_rwstat_exit(&stats->merged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) blkg_rwstat_exit(&stats->service_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) blkg_rwstat_exit(&stats->wait_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) blkg_rwstat_exit(&stats->queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) bfq_stat_exit(&stats->time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) bfq_stat_exit(&stats->avg_queue_size_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) bfq_stat_exit(&stats->avg_queue_size_samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) bfq_stat_exit(&stats->dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bfq_stat_exit(&stats->group_wait_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) bfq_stat_exit(&stats->idle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) bfq_stat_exit(&stats->empty_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (blkg_rwstat_init(&stats->bytes, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) blkg_rwstat_init(&stats->ios, gfp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #ifdef CONFIG_BFQ_CGROUP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (blkg_rwstat_init(&stats->merged, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) blkg_rwstat_init(&stats->service_time, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) blkg_rwstat_init(&stats->wait_time, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) blkg_rwstat_init(&stats->queued, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) bfq_stat_init(&stats->time, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) bfq_stat_init(&stats->dequeue, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) bfq_stat_init(&stats->group_wait_time, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) bfq_stat_init(&stats->idle_time, gfp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) bfq_stat_init(&stats->empty_time, gfp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) bfqg_stats_exit(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct bfq_group_data *bgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) bgd = kzalloc(sizeof(*bgd), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!bgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return &bgd->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static void bfq_cpd_init(struct blkcg_policy_data *cpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct bfq_group_data *d = cpd_to_bfqgd(cpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void bfq_cpd_free(struct blkcg_policy_data *cpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) kfree(cpd_to_bfqgd(cpd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct blkcg *blkcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct bfq_group *bfqg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (!bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (bfqg_stats_init(&bfqg->stats, gfp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) kfree(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* see comments in bfq_bic_update_cgroup for why refcounting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) bfqg_get(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return &bfqg->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static void bfq_pd_init(struct blkg_policy_data *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct blkcg_gq *blkg = pd_to_blkg(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct bfq_group *bfqg = blkg_to_bfqg(blkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct bfq_entity *entity = &bfqg->entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) entity->orig_weight = entity->weight = entity->new_weight = d->weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) entity->my_sched_data = &bfqg->sched_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) bfqg->my_entity = entity; /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * the root_group's will be set to NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * in bfq_init_queue()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) bfqg->bfqd = bfqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) bfqg->active_entities = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) bfqg->rq_pos_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static void bfq_pd_free(struct blkg_policy_data *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct bfq_group *bfqg = pd_to_bfqg(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) bfqg_stats_exit(&bfqg->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) bfqg_put(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct bfq_group *bfqg = pd_to_bfqg(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) bfqg_stats_reset(&bfqg->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void bfq_group_set_parent(struct bfq_group *bfqg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct bfq_group *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct bfq_entity *entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) entity = &bfqg->entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) entity->parent = parent->my_entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) entity->sched_data = &parent->sched_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct blkcg *blkcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct blkcg_gq *blkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) blkg = blkg_lookup(blkcg, bfqd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (likely(blkg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return blkg_to_bfqg(blkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct blkcg *blkcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct bfq_group *bfqg, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct bfq_entity *entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) bfqg = bfq_lookup_bfqg(bfqd, blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (unlikely(!bfqg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * Update chain of bfq_groups as we might be handling a leaf group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * which, along with some of its relatives, has not been hooked yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * to the private hierarchy of BFQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) entity = &bfqg->entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) for_each_entity(entity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct bfq_group *curr_bfqg = container_of(entity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct bfq_group, entity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (curr_bfqg != bfqd->root_group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) parent = bfqg_parent(curr_bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) parent = bfqd->root_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) bfq_group_set_parent(curr_bfqg, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return bfqg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * bfq_bfqq_move - migrate @bfqq to @bfqg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * @bfqd: queue descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * @bfqq: the queue to move.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * @bfqg: the group to move to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * it on the new one. Avoid putting the entity on the old group idle tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Must be called under the scheduler lock, to make sure that the blkg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * owning @bfqg does not disappear (see comments in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * objects).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct bfq_entity *entity = &bfqq->entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * until elevator exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (bfqq == &bfqd->oom_bfqq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * Get extra reference to prevent bfqq from being freed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * next possible expire or deactivate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) bfqq->ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* If bfqq is empty, then bfq_bfqq_expire also invokes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * bfq_del_bfqq_busy, thereby removing bfqq and its entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * from data structures related to current group. Otherwise we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * we do below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (bfqq == bfqd->in_service_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) false, BFQQE_PREEMPTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (bfq_bfqq_busy(bfqq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) bfq_deactivate_bfqq(bfqd, bfqq, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) else if (entity->on_st_or_in_serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) bfqg_and_blkg_put(bfqq_group(bfqq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) entity->parent = bfqg->my_entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) entity->sched_data = &bfqg->sched_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* pin down bfqg and its associated blkg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) bfqg_and_blkg_get(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (bfq_bfqq_busy(bfqq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (unlikely(!bfqd->nonrot_with_queueing))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) bfq_pos_tree_add_move(bfqd, bfqq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) bfq_activate_bfqq(bfqd, bfqq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) bfq_schedule_dispatch(bfqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* release extra ref taken above, bfqq may happen to be freed now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) bfq_put_queue(bfqq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * __bfq_bic_change_cgroup - move @bic to @cgroup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * @bfqd: the queue descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * @bic: the bic to move.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * @blkcg: the blk-cgroup to move to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * Move bic to blkcg, assuming that bfqd->lock is held; which makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * sure that the reference to cgroup is valid across the call (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * comments in bfq_bic_update_cgroup on this issue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * NOTE: an alternative approach might have been to store the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * cgroup in bfqq and getting a reference to it, reducing the lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * time here, at the price of slightly more complex code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct bfq_io_cq *bic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct blkcg *blkcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct bfq_group *bfqg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct bfq_entity *entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) bfqg = bfq_find_set_group(bfqd, blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (unlikely(!bfqg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) bfqg = bfqd->root_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (async_bfqq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) entity = &async_bfqq->entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (entity->sched_data != &bfqg->sched_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) bic_set_bfqq(bic, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) bfq_release_process_ref(bfqd, async_bfqq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (sync_bfqq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) entity = &sync_bfqq->entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (entity->sched_data != &bfqg->sched_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return bfqg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct bfq_data *bfqd = bic_to_bfqd(bic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct bfq_group *bfqg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) uint64_t serial_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) serial_nr = __bio_blkcg(bio)->css.serial_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * Check whether blkcg has changed. The condition may trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * spuriously on a newly created cic but there's no harm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * Update blkg_path for bfq_log_* functions. We cache this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * path, and update it here, for the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * reasons. Operations on blkg objects in blk-cgroup are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * protected with the request_queue lock, and not with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * lock that protects the instances of this scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * (bfqd->lock). This exposes BFQ to the following sort of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * race.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * The blkg_lookup performed in bfq_get_queue, protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * through rcu, may happen to return the address of a copy of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * the original blkg. If this is the case, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * the blkg, is useless: it does not prevent blk-cgroup code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * from destroying both the original blkg and all objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * directly or indirectly referred by the copy of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * blkg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * On the bright side, destroy operations on a blkg invoke, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * a first step, hooks of the scheduler associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * blkg. And these hooks are executed with bfqd->lock held for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * BFQ. As a consequence, for any blkg associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * request queue this instance of the scheduler is attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * to, we are guaranteed that such a blkg is not destroyed, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * that all the pointers it contains are consistent, while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * are holding bfqd->lock. A blkg_lookup performed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * bfqd->lock held then returns a fully consistent blkg, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * remains consistent until this lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * Thanks to the last fact, and to the fact that: (1) bfqg has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * been obtained through a blkg_lookup in the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * assignment, and (2) bfqd->lock is being held, here we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * safely use the policy data for the involved blkg (i.e., the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * field bfqg->pd) to get to the blkg associated with bfqg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * and then we can safely use any field of blkg. After we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * release bfqd->lock, even just getting blkg through this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * bfqg may cause dangling references to be traversed, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * bfqg->pd may not exist any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * In view of the above facts, here we cache, in the bfqg, any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * blkg data we may need for this bic, and for its associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * bfq_queue. As of now, we need to cache only the path of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * blkg, which is used in the bfq_log_* functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * Finally, note that bfqg itself needs to be protected from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * destruction on the blkg_free of the original blkg (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * invokes bfq_pd_free). We use an additional private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * refcounter for bfqg, to let it disappear only after no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * bfq_queue refers to it any longer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) bic->blkcg_serial_nr = serial_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * @st: the service tree being flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static void bfq_flush_idle_tree(struct bfq_service_tree *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct bfq_entity *entity = st->first_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) for (; entity ; entity = st->first_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) __bfq_deactivate_entity(entity, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * bfq_reparent_leaf_entity - move leaf entity to the root_group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * @bfqd: the device data structure with the root group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * @entity: the entity to move, if entity is a leaf; or the parent entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * of an active leaf entity to move, if entity is not a leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct bfq_entity *entity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int ioprio_class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct bfq_queue *bfqq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct bfq_entity *child_entity = entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) while (child_entity->my_sched_data) { /* leaf not reached yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct bfq_sched_data *child_sd = child_entity->my_sched_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct bfq_service_tree *child_st = child_sd->service_tree +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ioprio_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct rb_root *child_active = &child_st->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) child_entity = bfq_entity_of(rb_first(child_active));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (!child_entity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) child_entity = child_sd->in_service_entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) bfqq = bfq_entity_to_bfqq(child_entity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * bfq_reparent_active_queues - move to the root group all active queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * @bfqd: the device data structure with the root group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * @bfqg: the group to move from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * @st: the service tree to start the search from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static void bfq_reparent_active_queues(struct bfq_data *bfqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct bfq_group *bfqg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct bfq_service_tree *st,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) int ioprio_class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct rb_root *active = &st->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct bfq_entity *entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) while ((entity = bfq_entity_of(rb_first(active))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (bfqg->sched_data.in_service_entity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) bfq_reparent_leaf_entity(bfqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) bfqg->sched_data.in_service_entity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ioprio_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * bfq_pd_offline - deactivate the entity associated with @pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * and reparent its children entities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * @pd: descriptor of the policy going offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * blkio already grabs the queue_lock for us, so no need to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * RCU-based magic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static void bfq_pd_offline(struct blkg_policy_data *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct bfq_service_tree *st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct bfq_group *bfqg = pd_to_bfqg(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct bfq_data *bfqd = bfqg->bfqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct bfq_entity *entity = bfqg->my_entity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) spin_lock_irqsave(&bfqd->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (!entity) /* root group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto put_async_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * Empty all service_trees belonging to this group before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * deactivating the group itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) st = bfqg->sched_data.service_tree + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * It may happen that some queues are still active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * (busy) upon group destruction (if the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * processes have been forced to terminate). We move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * all the leaf entities corresponding to these queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * to the root_group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * Also, it may happen that the group has an entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * in service, which is disconnected from the active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * tree: it must be moved, too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * There is no need to put the sync queues, as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * scheduler has taken no reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) bfq_reparent_active_queues(bfqd, bfqg, st, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * The idle tree may still contain bfq_queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * belonging to exited task because they never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * migrated to a different cgroup from the one being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * destroyed now. In addition, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * bfq_reparent_active_queues() may happen to add some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * entities to the idle tree. It happens if, in some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * of the calls to bfq_bfqq_move() performed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * bfq_reparent_active_queues(), the queue to move is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * empty and gets expired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) bfq_flush_idle_tree(st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) __bfq_deactivate_entity(entity, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) put_async_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bfq_put_async_queues(bfqd, bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) spin_unlock_irqrestore(&bfqd->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * @blkg is going offline and will be ignored by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * that they don't get lost. If IOs complete after this point, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * stats for them will be lost. Oh well...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) bfqg_stats_xfer_dead(bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) void bfq_end_wr_async(struct bfq_data *bfqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct blkcg_gq *blkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct bfq_group *bfqg = blkg_to_bfqg(blkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) bfq_end_wr_async_queues(bfqd, bfqg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) bfq_end_wr_async_queues(bfqd, bfqd->root_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) unsigned int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (bfqgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) val = bfqgd->weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) seq_printf(sf, "%u\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static u64 bfqg_prfill_weight_device(struct seq_file *sf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct blkg_policy_data *pd, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct bfq_group *bfqg = pd_to_bfqg(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (!bfqg->entity.dev_weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) static int bfq_io_show_weight(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) seq_printf(sf, "default %u\n", bfqgd->weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) &blkcg_policy_bfq, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) weight = dev_weight ?: weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) bfqg->entity.dev_weight = dev_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * Setting the prio_changed flag of the entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * to 1 with new_weight == weight would re-set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * the value of the weight to its ioprio mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * Set the flag only if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if ((unsigned short)weight != bfqg->entity.new_weight) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) bfqg->entity.new_weight = (unsigned short)weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * Make sure that the above new value has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * stored in bfqg->entity.new_weight before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * setting the prio_changed flag. In fact,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * this flag may be read asynchronously (in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * critical sections protected by a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * lock than that held here), and finding this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * flag set may cause the execution of the code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * for updating parameters whose value may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * depend also on bfqg->entity.new_weight (in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * __bfq_entity_update_weight_prio).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * This barrier makes sure that the new value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * of bfqg->entity.new_weight is correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * seen in that code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) bfqg->entity.prio_changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct cftype *cftype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct blkcg *blkcg = css_to_blkcg(css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct blkcg_gq *blkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) spin_lock_irq(&blkcg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) bfqgd->weight = (unsigned short)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct bfq_group *bfqg = blkg_to_bfqg(blkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) bfq_group_set_weight(bfqg, val, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) spin_unlock_irq(&blkcg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) char *buf, size_t nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) loff_t off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct blkg_conf_ctx ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct blkcg *blkcg = css_to_blkcg(of_css(of));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct bfq_group *bfqg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (sscanf(ctx.body, "%llu", &v) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /* require "default" on dfl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (!v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) } else if (!strcmp(strim(ctx.body), "default")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) v = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) bfqg = blkg_to_bfqg(ctx.blkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) blkg_conf_finish(&ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return ret ?: nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) char *buf, size_t nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) loff_t off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) char *endp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) buf = strim(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* "WEIGHT" or "default WEIGHT" sets the default weight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) v = simple_strtoull(buf, &endp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return ret ?: nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return bfq_io_set_device_weight(of, buf, nbytes, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static int bfqg_print_rwstat(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) &blkcg_policy_bfq, seq_cft(sf)->private, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct blkg_policy_data *pd, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct blkg_rwstat_sample sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return __blkg_prfill_rwstat(sf, pd, &sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) seq_cft(sf)->private, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) #ifdef CONFIG_BFQ_CGROUP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static int bfqg_print_stat(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) &blkcg_policy_bfq, seq_cft(sf)->private, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct blkg_policy_data *pd, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct blkcg_gq *blkg = pd_to_blkg(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct blkcg_gq *pos_blkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct cgroup_subsys_state *pos_css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) u64 sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) lockdep_assert_held(&blkg->q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct bfq_stat *stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (!pos_blkg->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return __blkg_prfill_u64(sf, pd, sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) seq_cft(sf)->private, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return __blkg_prfill_u64(sf, pd, sum >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct blkg_policy_data *pd, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) struct blkg_rwstat_sample tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) offsetof(struct bfq_group, stats.bytes), &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return __blkg_prfill_u64(sf, pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct blkg_policy_data *pd, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct bfq_group *bfqg = pd_to_bfqg(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) u64 v = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (samples) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) v = div64_u64(v, samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) __blkg_prfill_u64(sf, pd, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* print avg_queue_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) #endif /* CONFIG_BFQ_CGROUP_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return blkg_to_bfqg(bfqd->queue->root_blkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct blkcg_policy blkcg_policy_bfq = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) .dfl_cftypes = bfq_blkg_files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) .legacy_cftypes = bfq_blkcg_legacy_files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) .cpd_alloc_fn = bfq_cpd_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) .cpd_init_fn = bfq_cpd_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) .cpd_bind_fn = bfq_cpd_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) .cpd_free_fn = bfq_cpd_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) .pd_alloc_fn = bfq_pd_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) .pd_init_fn = bfq_pd_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) .pd_offline_fn = bfq_pd_offline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) .pd_free_fn = bfq_pd_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) .pd_reset_stats_fn = bfq_pd_reset_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct cftype bfq_blkcg_legacy_files[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) .name = "bfq.weight",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) .flags = CFTYPE_NOT_ON_ROOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) .seq_show = bfq_io_show_weight_legacy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) .write_u64 = bfq_io_set_weight_legacy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) .name = "bfq.weight_device",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) .flags = CFTYPE_NOT_ON_ROOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) .seq_show = bfq_io_show_weight,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) .write = bfq_io_set_weight,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* statistics, covers only the tasks in the bfqg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) .name = "bfq.io_service_bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .private = offsetof(struct bfq_group, stats.bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) .seq_show = bfqg_print_rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) .name = "bfq.io_serviced",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) .private = offsetof(struct bfq_group, stats.ios),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) .seq_show = bfqg_print_rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) #ifdef CONFIG_BFQ_CGROUP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) .name = "bfq.time",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) .private = offsetof(struct bfq_group, stats.time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) .seq_show = bfqg_print_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) .name = "bfq.sectors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) .seq_show = bfqg_print_stat_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) .name = "bfq.io_service_time",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) .private = offsetof(struct bfq_group, stats.service_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) .seq_show = bfqg_print_rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) .name = "bfq.io_wait_time",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) .private = offsetof(struct bfq_group, stats.wait_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) .seq_show = bfqg_print_rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) .name = "bfq.io_merged",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .private = offsetof(struct bfq_group, stats.merged),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) .seq_show = bfqg_print_rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .name = "bfq.io_queued",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) .private = offsetof(struct bfq_group, stats.queued),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) .seq_show = bfqg_print_rwstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #endif /* CONFIG_BFQ_CGROUP_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* the same statistics which cover the bfqg and its descendants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) .name = "bfq.io_service_bytes_recursive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) .private = offsetof(struct bfq_group, stats.bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) .seq_show = bfqg_print_rwstat_recursive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .name = "bfq.io_serviced_recursive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .private = offsetof(struct bfq_group, stats.ios),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) .seq_show = bfqg_print_rwstat_recursive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) #ifdef CONFIG_BFQ_CGROUP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) .name = "bfq.time_recursive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .private = offsetof(struct bfq_group, stats.time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) .seq_show = bfqg_print_stat_recursive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) .name = "bfq.sectors_recursive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) .seq_show = bfqg_print_stat_sectors_recursive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) .name = "bfq.io_service_time_recursive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) .private = offsetof(struct bfq_group, stats.service_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) .seq_show = bfqg_print_rwstat_recursive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) .name = "bfq.io_wait_time_recursive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .private = offsetof(struct bfq_group, stats.wait_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) .seq_show = bfqg_print_rwstat_recursive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .name = "bfq.io_merged_recursive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .private = offsetof(struct bfq_group, stats.merged),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) .seq_show = bfqg_print_rwstat_recursive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) .name = "bfq.io_queued_recursive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) .private = offsetof(struct bfq_group, stats.queued),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) .seq_show = bfqg_print_rwstat_recursive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) .name = "bfq.avg_queue_size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) .seq_show = bfqg_print_avg_queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) .name = "bfq.group_wait_time",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) .private = offsetof(struct bfq_group, stats.group_wait_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) .seq_show = bfqg_print_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) .name = "bfq.idle_time",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) .private = offsetof(struct bfq_group, stats.idle_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) .seq_show = bfqg_print_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) .name = "bfq.empty_time",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) .private = offsetof(struct bfq_group, stats.empty_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) .seq_show = bfqg_print_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) .name = "bfq.dequeue",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) .private = offsetof(struct bfq_group, stats.dequeue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) .seq_show = bfqg_print_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) #endif /* CONFIG_BFQ_CGROUP_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) { } /* terminate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct cftype bfq_blkg_files[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .name = "bfq.weight",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .flags = CFTYPE_NOT_ON_ROOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .seq_show = bfq_io_show_weight,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .write = bfq_io_set_weight,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {} /* terminate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) #else /* CONFIG_BFQ_GROUP_IOSCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct bfq_group *bfqg) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) entity->weight = entity->new_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) entity->orig_weight = entity->new_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (bfqq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) bfqq->ioprio = bfqq->new_ioprio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) bfqq->ioprio_class = bfqq->new_ioprio_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) entity->sched_data = &bfqg->sched_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) void bfq_end_wr_async(struct bfq_data *bfqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) bfq_end_wr_async_queues(bfqd, bfqd->root_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return bfqd->root_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return bfqq->bfqd->root_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct bfq_group *bfqg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (!bfqg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return bfqg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) #endif /* CONFIG_BFQ_GROUP_IOSCHED */