Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "dm-core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "dm-rq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/elevator.h> /* for rq_end_sector() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define DM_MSG_PREFIX "core-rq"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * One of these is allocated per request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) struct dm_rq_target_io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct mapped_device *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	struct request *orig, *clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct kthread_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	blk_status_t error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	union map_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct dm_stats_aux stats_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	unsigned long duration_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	unsigned n_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	unsigned completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define DM_MQ_NR_HW_QUEUES 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define DM_MQ_QUEUE_DEPTH 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * Request-based DM's mempools' reserved IOs set by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define RESERVED_REQUEST_BASED_IOS	256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) unsigned dm_get_reserved_rq_based_ios(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	return __dm_get_module_param(&reserved_rq_based_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 				     RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static unsigned dm_get_blk_mq_nr_hw_queues(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static unsigned dm_get_blk_mq_queue_depth(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	return __dm_get_module_param(&dm_mq_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 				     DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) int dm_request_based(struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	return queue_is_mq(md->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) void dm_start_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	blk_mq_unquiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	blk_mq_kick_requeue_list(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) void dm_stop_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	blk_mq_quiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * Partial completion handling for request-based dm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static void end_clone_bio(struct bio *clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	struct dm_rq_clone_bio_info *info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		container_of(clone, struct dm_rq_clone_bio_info, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct dm_rq_target_io *tio = info->tio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	blk_status_t error = clone->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	bool is_last = !clone->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	bio_put(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (tio->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		 * An error has already been detected on the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		 * Once error occurred, just let clone->end_io() handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		 * the remainder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	else if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		 * Don't notice the error to the upper layer yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		 * The error handling decision is made by the target driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		 * when the request is completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		tio->error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	 * I/O for the bio successfully completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	 * Notice the data completion to the upper layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	tio->completed += nr_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 * Update the original request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 * Do not use blk_mq_end_request() here, because it may complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * the original request before the clone, and break the ordering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (is_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static struct dm_rq_target_io *tio_from_request(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	return blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void rq_end_stats(struct mapped_device *md, struct request *orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (unlikely(dm_stats_used(&md->stats))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		struct dm_rq_target_io *tio = tio_from_request(orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		tio->duration_jiffies = jiffies - tio->duration_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		dm_stats_account_io(&md->stats, rq_data_dir(orig),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 				    blk_rq_pos(orig), tio->n_sectors, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 				    tio->duration_jiffies, &tio->stats_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * Don't touch any member of the md after calling this function because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * the md may be freed in dm_put() at the end of this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * Or do dm_get() before calling this function and dm_put() later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void rq_completed(struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * dm_put() must be at the end of this function. See the comment above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	dm_put(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * Complete the clone and the original request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * Must be called without clone's queue lock held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * see end_clone_request() for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void dm_end_request(struct request *clone, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	struct dm_rq_target_io *tio = clone->end_io_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	struct mapped_device *md = tio->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct request *rq = tio->orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	blk_rq_unprep_clone(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	tio->ti->type->release_clone_rq(clone, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	rq_end_stats(md, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	blk_mq_end_request(rq, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	rq_completed(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	blk_mq_delay_kick_requeue_list(q, msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void dm_mq_kick_requeue_list(struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	__dm_mq_kick_requeue_list(md->queue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL(dm_mq_kick_requeue_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	blk_mq_requeue_request(rq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	__dm_mq_kick_requeue_list(rq->q, msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	struct mapped_device *md = tio->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct request *rq = tio->orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	unsigned long delay_ms = delay_requeue ? 100 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	rq_end_stats(md, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (tio->clone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		blk_rq_unprep_clone(tio->clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		tio->ti->type->release_clone_rq(tio->clone, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	dm_mq_delay_requeue_request(rq, delay_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	rq_completed(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void dm_done(struct request *clone, blk_status_t error, bool mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	int r = DM_ENDIO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct dm_rq_target_io *tio = clone->end_io_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	dm_request_endio_fn rq_end_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (tio->ti) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		rq_end_io = tio->ti->type->rq_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		if (mapped && rq_end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			r = rq_end_io(tio->ti, clone, error, &tio->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (unlikely(error == BLK_STS_TARGET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		if (req_op(clone) == REQ_OP_DISCARD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		    !clone->q->limits.max_discard_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			disable_discard(tio->md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		else if (req_op(clone) == REQ_OP_WRITE_SAME &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			 !clone->q->limits.max_write_same_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			disable_write_same(tio->md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			 !clone->q->limits.max_write_zeroes_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			disable_write_zeroes(tio->md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	switch (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	case DM_ENDIO_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		/* The target wants to complete the I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		dm_end_request(clone, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	case DM_ENDIO_INCOMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		/* The target will handle the I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	case DM_ENDIO_REQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		/* The target wants to requeue the I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		dm_requeue_original_request(tio, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	case DM_ENDIO_DELAY_REQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		/* The target wants to requeue the I/O after a delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		dm_requeue_original_request(tio, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		DMWARN("unimplemented target endio return value: %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  * Request completion handler for request-based dm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void dm_softirq_done(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	bool mapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	struct dm_rq_target_io *tio = tio_from_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	struct request *clone = tio->clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (!clone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		struct mapped_device *md = tio->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		rq_end_stats(md, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		blk_mq_end_request(rq, tio->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		rq_completed(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (rq->rq_flags & RQF_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	dm_done(clone, tio->error, mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * Complete the clone and the original request with the error status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * through softirq context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void dm_complete_request(struct request *rq, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	struct dm_rq_target_io *tio = tio_from_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	tio->error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (likely(!blk_should_fake_timeout(rq->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		blk_mq_complete_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * Complete the not-mapped clone and the original request with the error status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * through softirq context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * Target's rq_end_io() function isn't called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * This may be used when the target's clone_and_map_rq() function fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	rq->rq_flags |= RQF_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	dm_complete_request(rq, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void end_clone_request(struct request *clone, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	struct dm_rq_target_io *tio = clone->end_io_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	dm_complete_request(tio->orig, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	blk_status_t r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (blk_queue_io_stat(clone->q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		clone->rq_flags |= RQF_IO_STAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	clone->start_time_ns = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	r = blk_insert_cloned_request(clone->q, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		/* must complete clone in terms of original request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		dm_complete_request(rq, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 				 void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	struct dm_rq_target_io *tio = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	struct dm_rq_clone_bio_info *info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		container_of(bio, struct dm_rq_clone_bio_info, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	info->orig = bio_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	info->tio = tio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	bio->bi_end_io = end_clone_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static int setup_clone(struct request *clone, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		       struct dm_rq_target_io *tio, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			      dm_rq_bio_constructor, tio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	clone->end_io = end_clone_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	clone->end_io_data = tio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	tio->clone = clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		     struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	tio->md = md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	tio->ti = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	tio->clone = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	tio->orig = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	tio->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	tio->completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	 * Avoid initializing info for blk-mq; it passes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * target-specific data through info.ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	 * (see: dm_mq_init_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	if (!md->init_tio_pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		memset(&tio->info, 0, sizeof(tio->info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  * DM_MAPIO_*       : the request has been processed as indicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)  * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  * < 0              : the request was completed due to failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static int map_request(struct dm_rq_target_io *tio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	struct dm_target *ti = tio->ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	struct mapped_device *md = tio->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	struct request *rq = tio->orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	struct request *clone = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	switch (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	case DM_MAPIO_SUBMITTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		/* The target has taken the I/O to submit by itself later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	case DM_MAPIO_REMAPPED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			/* -ENOMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			ti->type->release_clone_rq(clone, &tio->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			return DM_MAPIO_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		/* The target has remapped the I/O so dispatch it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 				     blk_rq_pos(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		ret = dm_dispatch_clone_request(clone, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			blk_rq_unprep_clone(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			blk_mq_cleanup_rq(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			tio->ti->type->release_clone_rq(clone, &tio->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			tio->clone = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			return DM_MAPIO_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	case DM_MAPIO_REQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		/* The target wants to requeue the I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	case DM_MAPIO_DELAY_REQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		/* The target wants to requeue the I/O after a delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		dm_requeue_original_request(tio, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	case DM_MAPIO_KILL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		/* The target wants to complete the I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		dm_kill_unmapped_request(rq, BLK_STS_IOERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		DMWARN("unimplemented target map return value: %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	return sprintf(buf, "%u\n", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 						     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static void dm_start_request(struct mapped_device *md, struct request *orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	blk_mq_start_request(orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	if (unlikely(dm_stats_used(&md->stats))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		struct dm_rq_target_io *tio = tio_from_request(orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		tio->duration_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		tio->n_sectors = blk_rq_sectors(orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		dm_stats_account_io(&md->stats, rq_data_dir(orig),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 				    blk_rq_pos(orig), tio->n_sectors, false, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 				    &tio->stats_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	 * Hold the md reference here for the in-flight I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	 * We can't rely on the reference count by device opener,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	 * because the device may be closed during the request completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	 * when all bios are completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	 * See the comment in rq_completed() too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	dm_get(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			      unsigned int hctx_idx, unsigned int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	struct mapped_device *md = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	 * Must initialize md member of tio, otherwise it won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	 * be available in dm_mq_queue_rq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	tio->md = md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (md->init_tio_pdu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		/* target-specific per-io data is immediately after the tio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		tio->info.ptr = tio + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			  const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	struct request *rq = bd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	struct mapped_device *md = tio->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	struct dm_target *ti = md->immutable_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	if (unlikely(!ti)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		int srcu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		struct dm_table *map = dm_get_live_table(md, &srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		ti = dm_table_find_target(map, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		dm_put_live_table(md, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	if (ti->type->busy && ti->type->busy(ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	dm_start_request(md, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	/* Init tio using md established in .init_request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	init_tio(tio, rq, md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	 * Establish tio->ti before calling map_request().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	tio->ti = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	/* Direct call is fine since .queue_rq allows allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	if (map_request(tio) == DM_MAPIO_REQUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		/* Undo dm_start_request() before requeuing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		rq_end_stats(md, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		rq_completed(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static const struct blk_mq_ops dm_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	.queue_rq = dm_mq_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	.complete = dm_softirq_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	.init_request = dm_mq_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct dm_target *immutable_tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (!md->tag_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	md->tag_set->ops = &dm_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	md->tag_set->numa_node = md->numa_node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	md->tag_set->driver_data = md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	immutable_tgt = dm_table_get_immutable_target(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	if (immutable_tgt && immutable_tgt->per_io_data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		/* any target-specific per-io data is immediately after the tio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		md->init_tio_pdu = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	err = blk_mq_alloc_tag_set(md->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		goto out_kfree_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	if (IS_ERR(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		err = PTR_ERR(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		goto out_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) out_tag_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	blk_mq_free_tag_set(md->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) out_kfree_tag_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	kfree(md->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	md->tag_set = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) void dm_mq_cleanup_mapped_device(struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	if (md->tag_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		blk_mq_free_tag_set(md->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		kfree(md->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		md->tag_set = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Unused, but preserved for userspace compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static bool use_blk_mq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");