Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *  for the blk-mq scheduling framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/elevator.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/sbitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "blk-mq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "blk-mq-debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "blk-mq-tag.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "blk-mq-sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "mq-deadline-cgroup.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * See Documentation/block/deadline-iosched.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * Time after which to dispatch lower priority requests even if higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * priority requests are pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static const int aging_expire = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static const int writes_starved = 2;    /* max times reads can starve a write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static const int fifo_batch = 16;       /* # of sequential requests treated as one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 				     by the above parameters. For throughput. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) enum dd_data_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	DD_READ		= READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	DD_WRITE	= WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) enum { DD_DIR_COUNT = 2 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) enum dd_prio {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	DD_RT_PRIO	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	DD_BE_PRIO	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	DD_IDLE_PRIO	= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	DD_PRIO_MAX	= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) enum { DD_PRIO_COUNT = 3 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) /* I/O statistics for all I/O priorities (enum dd_prio). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) struct io_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct io_stats_per_prio stats[DD_PRIO_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * present on both sort_list[] and fifo_list[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) struct dd_per_prio {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	struct list_head dispatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	struct rb_root sort_list[DD_DIR_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	struct list_head fifo_list[DD_DIR_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	/* Next request in FIFO order. Read, write or both are NULL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	struct request *next_rq[DD_DIR_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) struct deadline_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	 * run time data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	/* Request queue that owns this data structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	struct request_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	struct dd_per_prio per_prio[DD_PRIO_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	/* Data direction of latest dispatched request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	enum dd_data_dir last_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	unsigned int batching;		/* number of sequential requests made */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	unsigned int starved;		/* times reads have starved writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	struct io_stats __percpu *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	 * settings that change how the i/o scheduler behaves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	int fifo_expire[DD_DIR_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	int fifo_batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	int writes_starved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	int front_merges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	u32 async_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	int aging_expire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	spinlock_t zone_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) /* Count one event of type 'event_type' and with I/O priority 'prio' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define dd_count(dd, event_type, prio) do {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct io_stats *io_stats = get_cpu_ptr((dd)->stats);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	BUILD_BUG_ON(!__same_type((prio), enum dd_prio));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	local_inc(&io_stats->stats[(prio)].event_type);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	put_cpu_ptr(io_stats);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  * Returns the total number of dd_count(dd, event_type, prio) calls across all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * CPUs. No locking or barriers since it is fine if the returned sum is slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  * outdated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define dd_sum(dd, event_type, prio) ({					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	unsigned int cpu;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	u32 sum = 0;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	BUILD_BUG_ON(!__same_type((prio), enum dd_prio));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	for_each_present_cpu(cpu)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		sum += local_read(&per_cpu_ptr((dd)->stats, cpu)->	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 				  stats[(prio)].event_type);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	sum;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) /* Maps an I/O priority class to a deadline scheduler priority. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static const enum dd_prio ioprio_class_to_prio[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	[IOPRIO_CLASS_NONE]	= DD_BE_PRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	[IOPRIO_CLASS_RT]	= DD_RT_PRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	[IOPRIO_CLASS_BE]	= DD_BE_PRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	[IOPRIO_CLASS_IDLE]	= DD_IDLE_PRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) static inline struct rb_root *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	return &per_prio->sort_list[rq_data_dir(rq)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static u8 dd_rq_ioclass(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * get the request after `rq' in sector-sorted order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static inline struct request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) deadline_latter_request(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	struct rb_node *node = rb_next(&rq->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		return rb_entry_rq(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	struct rb_root *root = deadline_rb_root(per_prio, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	elv_rb_add(root, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	const enum dd_data_dir data_dir = rq_data_dir(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	if (per_prio->next_rq[data_dir] == rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		per_prio->next_rq[data_dir] = deadline_latter_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	elv_rb_del(deadline_rb_root(per_prio, rq), rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * remove rq from rbtree and fifo.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) static void deadline_remove_request(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 				    struct dd_per_prio *per_prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 				    struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	list_del_init(&rq->queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	 * We might not be on the rbtree, if we are doing an insert merge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	if (!RB_EMPTY_NODE(&rq->rb_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		deadline_del_rq_rb(per_prio, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	elv_rqhash_del(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	if (q->last_merge == rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		q->last_merge = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static void dd_request_merged(struct request_queue *q, struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			      enum elv_merge type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	const u8 ioprio_class = dd_rq_ioclass(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	 * if the merge was a front merge, we need to reposition request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	if (type == ELEVATOR_FRONT_MERGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		elv_rb_del(deadline_rb_root(per_prio, req), req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		deadline_add_rq_rb(per_prio, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * Callback function that is invoked after @next has been merged into @req.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static void dd_merged_requests(struct request_queue *q, struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			       struct request *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	const u8 ioprio_class = dd_rq_ioclass(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	struct dd_blkcg *blkcg = next->elv.priv[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	dd_count(dd, merged, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	ddcg_count(blkcg, merged, ioprio_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	 * if next expires before rq, assign its expire time to rq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	 * and move into next position (next will be deleted) in fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		if (time_before((unsigned long)next->fifo_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 				(unsigned long)req->fifo_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			list_move(&req->queuelist, &next->queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			req->fifo_time = next->fifo_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 * kill knowledge of next, this one is a goner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	deadline_remove_request(q, &dd->per_prio[prio], next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * move an entry to dispatch queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		      struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	const enum dd_data_dir data_dir = rq_data_dir(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	per_prio->next_rq[data_dir] = deadline_latter_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	 * take it off the sort and fifo list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	deadline_remove_request(rq->q, per_prio, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) /* Number of requests queued for a given priority level. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 				      enum dd_data_dir data_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	 * rq is expired!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  * For the specified data direction, return the next request to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * dispatch using arrival ordered lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static struct request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		      enum dd_data_dir data_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	if (list_empty(&per_prio->fifo_list[data_dir]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	 * Look for a write request that can be dispatched, that is one with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	 * an unlocked target zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	spin_lock_irqsave(&dd->zone_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		if (blk_req_can_dispatch_to_zone(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	spin_unlock_irqrestore(&dd->zone_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * For the specified data direction, return the next request to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * dispatch using sector position sorted lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) static struct request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		      enum dd_data_dir data_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	rq = per_prio->next_rq[data_dir];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	if (!rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * Look for a write request that can be dispatched, that is one with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * an unlocked target zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	spin_lock_irqsave(&dd->zone_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	while (rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		if (blk_req_can_dispatch_to_zone(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		rq = deadline_latter_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	spin_unlock_irqrestore(&dd->zone_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  * deadline_dispatch_requests selects the best request according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  * read/write expire, fifo_batch, etc and with a start time <= @latest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) static struct request *__dd_dispatch_request(struct deadline_data *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 					     struct dd_per_prio *per_prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 					     u64 latest_start_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct request *rq, *next_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	enum dd_data_dir data_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	struct dd_blkcg *blkcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	enum dd_prio prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	u8 ioprio_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	lockdep_assert_held(&dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	if (!list_empty(&per_prio->dispatch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		rq = list_first_entry(&per_prio->dispatch, struct request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 				      queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		if (rq->start_time_ns > latest_start_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		list_del_init(&rq->queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * batches are currently reads XOR writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	rq = deadline_next_request(dd, per_prio, dd->last_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if (rq && dd->batching < dd->fifo_batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		/* we have a next request are still entitled to batch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		goto dispatch_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	 * at this point we are not running a batch. select the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	 * data direction (read / write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (!list_empty(&per_prio->fifo_list[DD_READ])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		    (dd->starved++ >= dd->writes_starved))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 			goto dispatch_writes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		data_dir = DD_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		goto dispatch_find_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	 * there are either no reads or writes have been starved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) dispatch_writes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		dd->starved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		data_dir = DD_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		goto dispatch_find_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) dispatch_find_request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	 * we are not running a batch, find best request for selected data_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	next_rq = deadline_next_request(dd, per_prio, data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		 * A deadline has expired, the last request was in the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		 * direction, or we have run out of higher-sectored requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		 * Start again from the request with the earliest expiry time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		rq = deadline_fifo_request(dd, per_prio, data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		 * The last req was the same dir and we have a next request in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		 * sort order. No expired requests so continue on from here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		rq = next_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	 * For a zoned block device, if we only have writes queued and none of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	 * them can be dispatched, rq will be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (!rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	dd->last_dir = data_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	dd->batching = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) dispatch_request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (rq->start_time_ns > latest_start_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	 * rq is the selected appropriate request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	dd->batching++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	deadline_move_request(dd, per_prio, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	ioprio_class = dd_rq_ioclass(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	prio = ioprio_class_to_prio[ioprio_class];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	dd_count(dd, dispatched, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	blkcg = rq->elv.priv[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	ddcg_count(blkcg, dispatched, ioprio_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	 * If the request needs its target zone locked, do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	blk_req_zone_write_lock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	rq->rq_flags |= RQF_STARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * One confusing aspect here is that we get called for a specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * hardware queue, but we may return a request that is for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  * different hardware queue. This is because mq-deadline has shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  * state for all hardware queues, in terms of sorting, FIFOs, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	const u64 now_ns = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	struct request *rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	enum dd_prio prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	spin_lock(&dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	 * Start with dispatching requests whose deadline expired more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	 * aging_expire jiffies ago.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 					   jiffies_to_nsecs(dd->aging_expire));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		if (rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	 * Next, dispatch requests in priority order. Ignore lower priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	 * requests if any higher priority requests are pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		if (rq || dd_queued(dd, prio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	spin_unlock(&dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * function is used by __blk_mq_get_tag().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct deadline_data *dd = data->q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	/* Do not throttle synchronous reads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (op_is_sync(op) && !op_is_write(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * Throttle asynchronous requests and writes such that these requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 * do not block the allocation of synchronous requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	data->shallow_depth = dd->async_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) /* Called by blk_mq_update_nr_requests(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct blk_mq_tags *tags = hctx->sched_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	dd_depth_updated(hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) static void dd_exit_sched(struct elevator_queue *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	struct deadline_data *dd = e->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	enum dd_prio prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	dd_deactivate_policy(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	free_percpu(dd->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	kfree(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  * Initialize elevator private data (deadline_data) and associate with blkcg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct deadline_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct elevator_queue *eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	enum dd_prio prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	 * Initialization would be very tricky if the queue is not frozen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 * hence the warning statement below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	eq = elevator_alloc(q, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (!eq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		goto put_eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	eq->elevator_data = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 				     GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (!dd->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		goto free_dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	dd->queue = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		INIT_LIST_HEAD(&per_prio->dispatch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		per_prio->sort_list[DD_READ] = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		per_prio->sort_list[DD_WRITE] = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	dd->fifo_expire[DD_READ] = read_expire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	dd->fifo_expire[DD_WRITE] = write_expire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	dd->writes_starved = writes_starved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	dd->front_merges = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	dd->last_dir = DD_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	dd->fifo_batch = fifo_batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	dd->aging_expire = aging_expire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	spin_lock_init(&dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	spin_lock_init(&dd->zone_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	ret = dd_activate_policy(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		goto free_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	q->elevator = eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) free_stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	free_percpu(dd->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) free_dd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	kfree(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) put_eq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	kobject_put(&eq->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  * Try to merge @bio into an existing request. If @bio has been merged into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  * an existing request, store the pointer to that request into *@rq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) static int dd_request_merge(struct request_queue *q, struct request **rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			    struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	sector_t sector = bio_end_sector(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	struct request *__rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	if (!dd->front_merges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		return ELEVATOR_NO_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	__rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (__rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		BUG_ON(sector != blk_rq_pos(__rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		if (elv_bio_merge_ok(__rq, bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			*rq = __rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			if (blk_discard_mergable(__rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				return ELEVATOR_DISCARD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			return ELEVATOR_FRONT_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	return ELEVATOR_NO_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  * Attempt to merge a bio into an existing request. This function is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  * before @bio is associated with a request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		unsigned int nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	struct request *free = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	spin_lock(&dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	spin_unlock(&dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		blk_mq_free_request(free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * add rq to rbtree and fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			      bool at_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	const enum dd_data_dir data_dir = rq_data_dir(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	u16 ioprio = req_get_ioprio(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct dd_per_prio *per_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	enum dd_prio prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	struct dd_blkcg *blkcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	lockdep_assert_held(&dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	 * This may be a requeue of a write request that has locked its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	 * target zone. If it is the case, this releases the zone lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	blk_req_zone_write_unlock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * If a block cgroup has been associated with the submitter and if an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * I/O priority has been set in the associated block cgroup, use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 * lowest of the cgroup priority and the request priority for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	 * request. If no priority has been set in the request, use the cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	 * priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	prio = ioprio_class_to_prio[ioprio_class];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	dd_count(dd, inserted, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	blkcg = dd_blkcg_from_bio(rq->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	ddcg_count(blkcg, inserted, ioprio_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	rq->elv.priv[0] = blkcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	if (blk_mq_sched_try_insert_merge(q, rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	blk_mq_sched_request_inserted(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	per_prio = &dd->per_prio[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	if (at_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		list_add(&rq->queuelist, &per_prio->dispatch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		deadline_add_rq_rb(per_prio, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		if (rq_mergeable(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			elv_rqhash_add(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			if (!q->last_merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 				q->last_merge = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		 * set expire time and add to fifo list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770)  * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			       struct list_head *list, bool at_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	spin_lock(&dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	while (!list_empty(list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		rq = list_first_entry(list, struct request, queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		list_del_init(&rq->queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		dd_insert_request(hctx, rq, at_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	spin_unlock(&dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) /* Callback from inside blk_mq_rq_ctx_init(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) static void dd_prepare_request(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	rq->elv.priv[0] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796)  * Callback from inside blk_mq_free_request().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  * For zoned block devices, write unlock the target zone of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  * completed write requests. Do this while holding the zone lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800)  * spinlock so that the zone is never unlocked while deadline_fifo_request()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801)  * or deadline_next_request() are executing. This function is called for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802)  * all requests, whether or not these requests complete successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804)  * For a zoned block device, __dd_dispatch_request() may have stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  * dispatching requests if all the queued requests are write requests directed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  * at zones that are already locked due to on-going write requests. To ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807)  * write request dispatch progress in this case, mark the queue as needing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808)  * restart to ensure that the queue is run again after completion of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  * request and zones being unlocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) static void dd_finish_request(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	struct request_queue *q = rq->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct dd_blkcg *blkcg = rq->elv.priv[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	const u8 ioprio_class = dd_rq_ioclass(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	dd_count(dd, completed, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	ddcg_count(blkcg, completed, ioprio_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (blk_queue_is_zoned(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		spin_lock_irqsave(&dd->zone_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		blk_req_zone_write_unlock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		spin_unlock_irqrestore(&dd->zone_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	return !list_empty_careful(&per_prio->dispatch) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		!list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		!list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	enum dd_prio prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	for (prio = 0; prio <= DD_PRIO_MAX; prio++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		if (dd_has_work_for_prio(&dd->per_prio[prio]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  * sysfs parts below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) #define SHOW_INT(__FUNC, __VAR)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	struct deadline_data *dd = e->elevator_data;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	return sysfs_emit(page, "%d\n", __VAR);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) SHOW_INT(deadline_front_merges_show, dd->front_merges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) SHOW_INT(deadline_async_depth_show, dd->front_merges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) #undef SHOW_INT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) #undef SHOW_JIFFIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct deadline_data *dd = e->elevator_data;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	int __data, __ret;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	__ret = kstrtoint(page, 0, &__data);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (__ret < 0)							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		return __ret;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (__data < (MIN))						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		__data = (MIN);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	else if (__data > (MAX))					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		__data = (MAX);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	*(__PTR) = __CONV(__data);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	return count;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) #define STORE_INT(__FUNC, __PTR, MIN, MAX)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) #undef STORE_FUNCTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) #undef STORE_INT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) #undef STORE_JIFFIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) #define DD_ATTR(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static struct elv_fs_entry deadline_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	DD_ATTR(read_expire),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	DD_ATTR(write_expire),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	DD_ATTR(writes_starved),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	DD_ATTR(front_merges),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	DD_ATTR(async_depth),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	DD_ATTR(fifo_batch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	DD_ATTR(aging_expire),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	__ATTR_NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) #ifdef CONFIG_BLK_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) static void *deadline_##name##_fifo_start(struct seq_file *m,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 					  loff_t *pos)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	__acquires(&dd->lock)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	struct request_queue *q = m->private;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	struct deadline_data *dd = q->elevator->elevator_data;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	spin_lock(&dd->lock);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	return seq_list_start(&per_prio->fifo_list[data_dir], *pos);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 					 loff_t *pos)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct request_queue *q = m->private;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	struct deadline_data *dd = q->elevator->elevator_data;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	__releases(&dd->lock)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	struct request_queue *q = m->private;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	struct deadline_data *dd = q->elevator->elevator_data;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	spin_unlock(&dd->lock);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	.start	= deadline_##name##_fifo_start,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	.next	= deadline_##name##_fifo_next,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	.stop	= deadline_##name##_fifo_stop,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	.show	= blk_mq_debugfs_rq_show,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) };									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) static int deadline_##name##_next_rq_show(void *data,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 					  struct seq_file *m)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	struct request_queue *q = data;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	struct deadline_data *dd = q->elevator->elevator_data;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	struct request *rq = per_prio->next_rq[data_dir];		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (rq)								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		__blk_mq_debugfs_rq_show(m, rq);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	return 0;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) #undef DEADLINE_DEBUGFS_DDIR_ATTRS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) static int deadline_batching_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	seq_printf(m, "%u\n", dd->batching);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) static int deadline_starved_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	seq_printf(m, "%u\n", dd->starved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static int dd_async_depth_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	seq_printf(m, "%u\n", dd->async_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static int dd_queued_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		   dd_queued(dd, DD_BE_PRIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		   dd_queued(dd, DD_IDLE_PRIO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* Number of requests owned by the block driver for a given priority. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		- dd_sum(dd, completed, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static int dd_owned_by_driver_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	struct deadline_data *dd = q->elevator->elevator_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		   dd_owned_by_driver(dd, DD_BE_PRIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		   dd_owned_by_driver(dd, DD_IDLE_PRIO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) #define DEADLINE_DISPATCH_ATTR(prio)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static void *deadline_dispatch##prio##_start(struct seq_file *m,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 					     loff_t *pos)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	__acquires(&dd->lock)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct request_queue *q = m->private;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	struct deadline_data *dd = q->elevator->elevator_data;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	spin_lock(&dd->lock);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	return seq_list_start(&per_prio->dispatch, *pos);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static void *deadline_dispatch##prio##_next(struct seq_file *m,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 					    void *v, loff_t *pos)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	struct request_queue *q = m->private;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	struct deadline_data *dd = q->elevator->elevator_data;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	return seq_list_next(v, &per_prio->dispatch, pos);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	__releases(&dd->lock)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	struct request_queue *q = m->private;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	struct deadline_data *dd = q->elevator->elevator_data;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	spin_unlock(&dd->lock);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	.start	= deadline_dispatch##prio##_start,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	.next	= deadline_dispatch##prio##_next,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	.stop	= deadline_dispatch##prio##_stop,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	.show	= blk_mq_debugfs_rq_show,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) DEADLINE_DISPATCH_ATTR(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) DEADLINE_DISPATCH_ATTR(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) DEADLINE_DISPATCH_ATTR(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) #undef DEADLINE_DISPATCH_ATTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) #define DEADLINE_QUEUE_DDIR_ATTRS(name)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	{#name "_fifo_list", 0400,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			.seq_ops = &deadline_##name##_fifo_seq_ops}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) #define DEADLINE_NEXT_RQ_ATTR(name)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	DEADLINE_QUEUE_DDIR_ATTRS(read0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	DEADLINE_QUEUE_DDIR_ATTRS(write0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	DEADLINE_QUEUE_DDIR_ATTRS(read1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	DEADLINE_QUEUE_DDIR_ATTRS(write1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	DEADLINE_QUEUE_DDIR_ATTRS(read2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	DEADLINE_QUEUE_DDIR_ATTRS(write2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	DEADLINE_NEXT_RQ_ATTR(read0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	DEADLINE_NEXT_RQ_ATTR(write0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	DEADLINE_NEXT_RQ_ATTR(read1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	DEADLINE_NEXT_RQ_ATTR(write1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	DEADLINE_NEXT_RQ_ATTR(read2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	DEADLINE_NEXT_RQ_ATTR(write2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	{"batching", 0400, deadline_batching_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	{"starved", 0400, deadline_starved_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	{"async_depth", 0400, dd_async_depth_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	{"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	{"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	{"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	{"owned_by_driver", 0400, dd_owned_by_driver_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	{"queued", 0400, dd_queued_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) #undef DEADLINE_QUEUE_DDIR_ATTRS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static struct elevator_type mq_deadline = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		.depth_updated		= dd_depth_updated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		.limit_depth		= dd_limit_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		.insert_requests	= dd_insert_requests,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		.dispatch_request	= dd_dispatch_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		.prepare_request	= dd_prepare_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		.finish_request		= dd_finish_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		.next_request		= elv_rb_latter_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		.former_request		= elv_rb_former_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		.bio_merge		= dd_bio_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		.request_merge		= dd_request_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		.requests_merged	= dd_merged_requests,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		.request_merged		= dd_request_merged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		.has_work		= dd_has_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		.init_sched		= dd_init_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		.exit_sched		= dd_exit_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		.init_hctx		= dd_init_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) #ifdef CONFIG_BLK_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	.elevator_attrs = deadline_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	.elevator_name = "mq-deadline",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	.elevator_alias = "deadline",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	.elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	.elevator_owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) MODULE_ALIAS("mq-deadline-iosched");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static int __init deadline_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	ret = elv_register(&mq_deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	ret = dd_blkcg_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		goto unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) unreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	elv_unregister(&mq_deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static void __exit deadline_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	dd_blkcg_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	elv_unregister(&mq_deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) module_init(deadline_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) module_exit(deadline_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) MODULE_DESCRIPTION("MQ deadline IO scheduler");