^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef RQ_QOS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define RQ_QOS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/blk_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "blk-mq-debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct blk_mq_debugfs_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) enum rq_qos_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) RQ_QOS_WBT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) RQ_QOS_LATENCY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) RQ_QOS_COST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) RQ_QOS_IOPRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct rq_wait {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) atomic_t inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct rq_qos {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct rq_qos_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) enum rq_qos_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct rq_qos *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #ifdef CONFIG_BLK_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct dentry *debugfs_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct rq_qos_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void (*throttle)(struct rq_qos *, struct bio *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void (*track)(struct rq_qos *, struct request *, struct bio *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void (*merge)(struct rq_qos *, struct request *, struct bio *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void (*issue)(struct rq_qos *, struct request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void (*requeue)(struct rq_qos *, struct request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void (*done)(struct rq_qos *, struct request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) void (*done_bio)(struct rq_qos *, struct bio *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void (*cleanup)(struct rq_qos *, struct bio *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void (*queue_depth_changed)(struct rq_qos *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void (*exit)(struct rq_qos *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) const struct blk_mq_debugfs_attr *debugfs_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct rq_depth {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int max_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int scale_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) bool scaled_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned int queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned int default_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline struct rq_qos *rq_qos_id(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) enum rq_qos_id id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct rq_qos *rqos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (rqos->id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return rqos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return rq_qos_id(q, RQ_QOS_WBT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return rq_qos_id(q, RQ_QOS_LATENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static inline void rq_wait_init(struct rq_wait *rq_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) atomic_set(&rq_wait->inflight, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) init_waitqueue_head(&rq_wait->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * No IO can be in-flight when adding rqos, so freeze queue, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * is fine since we only support rq_qos for blk-mq queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Reuse ->queue_lock for protecting against other concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * rq_qos adding/deleting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) blk_mq_freeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) rqos->next = q->rq_qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) q->rq_qos = rqos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) blk_mq_unfreeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (rqos->ops->debugfs_attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) blk_mq_debugfs_register_rqos(rqos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct rq_qos **cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * See comment in rq_qos_add() about freezing queue & using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * ->queue_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) blk_mq_freeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (*cur == rqos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *cur = rqos->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) blk_mq_unfreeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) blk_mq_debugfs_unregister_rqos(rqos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void rq_qos_wait(struct rq_wait *rqw, void *private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) acquire_inflight_cb_t *acquire_inflight_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) cleanup_cb_t *cleanup_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) bool rq_depth_scale_up(struct rq_depth *rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bool rq_depth_calc_max_depth(struct rq_depth *rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (q->rq_qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) __rq_qos_cleanup(q->rq_qos, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline void rq_qos_done(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (q->rq_qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __rq_qos_done(q->rq_qos, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (q->rq_qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) __rq_qos_issue(q->rq_qos, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (q->rq_qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) __rq_qos_requeue(q->rq_qos, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (q->rq_qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) __rq_qos_done_bio(q->rq_qos, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * BIO_TRACKED lets controllers know that a bio went through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * normal rq_qos path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) bio_set_flag(bio, BIO_TRACKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (q->rq_qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __rq_qos_throttle(q->rq_qos, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline void rq_qos_track(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (q->rq_qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) __rq_qos_track(q->rq_qos, rq, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (q->rq_qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) __rq_qos_merge(q->rq_qos, rq, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static inline void rq_qos_queue_depth_changed(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (q->rq_qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __rq_qos_queue_depth_changed(q->rq_qos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void rq_qos_exit(struct request_queue *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #endif