Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2017 Western Digital Corporation or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include "dm-zoned.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #define	DM_MSG_PREFIX		"zoned"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #define DMZ_MIN_BIOS		8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * Zone BIO context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) struct dmz_bioctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 	struct dmz_dev		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	struct dm_zone		*zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 	struct bio		*bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	refcount_t		ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * Chunk work descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) struct dm_chunk_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	struct work_struct	work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	refcount_t		refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	struct dmz_target	*target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	unsigned int		chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	struct bio_list		bio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * Target descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) struct dmz_target {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	struct dm_dev		**ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	unsigned int		nr_ddevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	unsigned int		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	/* Zoned block device information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	struct dmz_dev		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	/* For metadata handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	struct dmz_metadata     *metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	/* For chunk work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	struct radix_tree_root	chunk_rxtree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	struct workqueue_struct *chunk_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct mutex		chunk_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	/* For cloned BIOs to zones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	struct bio_set		bio_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	/* For flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	spinlock_t		flush_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	struct bio_list		flush_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	struct delayed_work	flush_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	struct workqueue_struct *flush_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * Flush intervals (seconds).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define DMZ_FLUSH_PERIOD	(10 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * Target BIO completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct dmz_bioctx *bioctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		bio->bi_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	if (bioctx->dev && bio->bi_status != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		bioctx->dev->flags |= DMZ_CHECK_BDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (refcount_dec_and_test(&bioctx->ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		struct dm_zone *zone = bioctx->zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		if (zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 			if (bio->bi_status != BLK_STS_OK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 			    bio_op(bio) == REQ_OP_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 			    dmz_is_seq(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 				set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 			dmz_deactivate_zone(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * Completion callback for an internally cloned target BIO. This terminates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * target BIO when there are no more references to its context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static void dmz_clone_endio(struct bio *clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct dmz_bioctx *bioctx = clone->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	blk_status_t status = clone->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	bio_put(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	dmz_bio_endio(bioctx->bio, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * Issue a clone of a target BIO. The clone may only partially process the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * original target BIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 			  struct bio *bio, sector_t chunk_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 			  unsigned int nr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct dmz_bioctx *bioctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	struct dmz_dev *dev = zone->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	struct bio *clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	if (dev->flags & DMZ_BDEV_DYING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (!clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	bio_set_dev(clone, dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	bioctx->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	clone->bi_iter.bi_sector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	clone->bi_end_io = dmz_clone_endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	clone->bi_private = bioctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	bio_advance(bio, clone->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	refcount_inc(&bioctx->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	submit_bio_noacct(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		zone->wp_block += nr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * Zero out pages of discarded blocks accessed by a read BIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 				 sector_t chunk_block, unsigned int nr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* Clear nr_blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	swap(bio->bi_iter.bi_size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	zero_fill_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	swap(bio->bi_iter.bi_size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	bio_advance(bio, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  * Process a read BIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			   struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	struct dmz_metadata *zmd = dmz->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	unsigned int nr_blocks = dmz_bio_blocks(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	sector_t end_block = chunk_block + nr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	struct dm_zone *rzone, *bzone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	/* Read into unmapped chunks need only zeroing the BIO buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (!zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		zero_fill_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		dmz_metadata_label(zmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		(unsigned long long)dmz_bio_chunk(zmd, bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		(dmz_is_rnd(zone) ? "RND" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		 (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		zone->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		(unsigned long long)chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	/* Check block validity to determine the read location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	bzone = zone->bzone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	while (chunk_block < end_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		nr_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		    chunk_block < zone->wp_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			/* Test block validity in the data zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			ret = dmz_block_valid(zmd, zone, chunk_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 				/* Read data zone blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 				nr_blocks = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 				rzone = zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		 * No valid blocks found in the data zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		 * Check the buffer zone, if there is one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		if (!nr_blocks && bzone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 			ret = dmz_block_valid(zmd, bzone, chunk_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 				/* Read buffer zone blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 				nr_blocks = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 				rzone = bzone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		if (nr_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 			/* Valid blocks found: read them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			nr_blocks = min_t(unsigned int, nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 					  end_block - chunk_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			ret = dmz_submit_bio(dmz, rzone, bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 					     chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			chunk_block += nr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			/* No valid block: zeroout the current BIO block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			dmz_handle_read_zero(dmz, bio, chunk_block, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			chunk_block++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * Write blocks directly in a data zone, at the write pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * If a buffer zone is assigned, invalidate the blocks written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  * in place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static int dmz_handle_direct_write(struct dmz_target *dmz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 				   struct dm_zone *zone, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 				   sector_t chunk_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 				   unsigned int nr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct dmz_metadata *zmd = dmz->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	struct dm_zone *bzone = zone->bzone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	if (dmz_is_readonly(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	/* Submit write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 * Validate the blocks in the data zone and invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 * in the buffer zone, if there is one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	if (ret == 0 && bzone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * Write blocks in the buffer zone of @zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * If no buffer zone is assigned yet, get one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * Called with @zone write locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static int dmz_handle_buffered_write(struct dmz_target *dmz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 				     struct dm_zone *zone, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 				     sector_t chunk_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 				     unsigned int nr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	struct dmz_metadata *zmd = dmz->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	struct dm_zone *bzone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	/* Get the buffer zone. One will be allocated if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	bzone = dmz_get_chunk_buffer(zmd, zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (IS_ERR(bzone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		return PTR_ERR(bzone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (dmz_is_readonly(bzone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	/* Submit write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	 * Validate the blocks in the buffer zone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	 * and invalidate in the data zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	if (ret == 0 && chunk_block < zone->wp_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * Process a write BIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			    struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	struct dmz_metadata *zmd = dmz->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	unsigned int nr_blocks = dmz_bio_blocks(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (!zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		dmz_metadata_label(zmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		(unsigned long long)dmz_bio_chunk(zmd, bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		(dmz_is_rnd(zone) ? "RND" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		 (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		zone->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		(unsigned long long)chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	    chunk_block == zone->wp_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		 * zone is a random zone or it is a sequential zone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		 * and the BIO is aligned to the zone write pointer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		 * direct write the zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		return dmz_handle_direct_write(dmz, zone, bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 					       chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	 * This is an unaligned write in a sequential zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	 * use buffered write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * Process a discard BIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			      struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct dmz_metadata *zmd = dmz->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	sector_t block = dmz_bio_block(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	unsigned int nr_blocks = dmz_bio_blocks(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	sector_t chunk_block = dmz_chunk_block(zmd, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	/* For unmapped chunks, there is nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	if (!zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	if (dmz_is_readonly(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		dmz_metadata_label(dmz->metadata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		(unsigned long long)dmz_bio_chunk(zmd, bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		zone->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		(unsigned long long)chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	 * Invalidate blocks in the data zone and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	 * buffer zone if one is mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	    chunk_block < zone->wp_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (ret == 0 && zone->bzone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		ret = dmz_invalidate_blocks(zmd, zone->bzone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 					    chunk_block, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  * Process a BIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			   struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	struct dmz_bioctx *bioctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	struct dmz_metadata *zmd = dmz->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	struct dm_zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	dmz_lock_metadata(zmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	 * Get the data zone mapping the chunk. There may be no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	 * mapping for read and discard. If a mapping is obtained,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	 + the zone returned will be set to active state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 				     bio_op(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	if (IS_ERR(zone)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		ret = PTR_ERR(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	/* Process the BIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		dmz_activate_zone(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		bioctx->zone = zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		dmz_reclaim_bio_acc(zone->dev->reclaim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	switch (bio_op(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	case REQ_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		ret = dmz_handle_read(dmz, zone, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		ret = dmz_handle_write(dmz, zone, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		ret = dmz_handle_discard(dmz, zone, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		DMERR("(%s): Unsupported BIO operation 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		      dmz_metadata_label(dmz->metadata), bio_op(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 * Release the chunk mapping. This will check that the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 * is still valid, that is, that the zone used still has valid blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		dmz_put_chunk_mapping(zmd, zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	dmz_bio_endio(bio, errno_to_blk_status(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	dmz_unlock_metadata(zmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  * Increment a chunk reference counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	refcount_inc(&cw->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  * Decrement a chunk work reference count and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * free it if it becomes 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) static void dmz_put_chunk_work(struct dm_chunk_work *cw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (refcount_dec_and_test(&cw->refcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		WARN_ON(!bio_list_empty(&cw->bio_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		kfree(cw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * Chunk BIO work function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) static void dmz_chunk_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct dmz_target *dmz = cw->target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	mutex_lock(&dmz->chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* Process the chunk BIOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	while ((bio = bio_list_pop(&cw->bio_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		mutex_unlock(&dmz->chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		dmz_handle_bio(dmz, cw, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		mutex_lock(&dmz->chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		dmz_put_chunk_work(cw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	/* Queueing the work incremented the work refcount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	dmz_put_chunk_work(cw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	mutex_unlock(&dmz->chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * Flush work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static void dmz_flush_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/* Flush dirty metadata blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	ret = dmz_flush_metadata(dmz->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		DMDEBUG("(%s): Metadata flush failed, rc=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			dmz_metadata_label(dmz->metadata), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	/* Process queued flush requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		spin_lock(&dmz->flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		bio = bio_list_pop(&dmz->flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		spin_unlock(&dmz->flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		dmz_bio_endio(bio, errno_to_blk_status(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  * Get a chunk work and start it to process a new BIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  * If the BIO chunk has no work yet, create one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	struct dm_chunk_work *cw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	mutex_lock(&dmz->chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	/* Get the BIO chunk work. If one is not active yet, create one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (cw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		dmz_get_chunk_work(cw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		/* Create a new chunk work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		if (unlikely(!cw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		INIT_WORK(&cw->work, dmz_chunk_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		refcount_set(&cw->refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		cw->target = dmz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		cw->chunk = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		bio_list_init(&cw->bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			kfree(cw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	bio_list_add(&cw->bio_list, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (queue_work(dmz->chunk_wq, &cw->work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		dmz_get_chunk_work(cw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	mutex_unlock(&dmz->chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * Check if the backing device is being removed. If it's on the way out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  * start failing I/O. Reclaim and metadata components also call this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  * function to cleanly abort operation in the event of such failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (dmz_dev->flags & DMZ_BDEV_DYING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (dmz_dev->flags & DMZ_CHECK_BDEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		return !dmz_check_bdev(dmz_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		dmz_dev_warn(dmz_dev, "Backing device queue dying");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		dmz_dev->flags |= DMZ_BDEV_DYING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	return dmz_dev->flags & DMZ_BDEV_DYING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  * Check the backing device availability. This detects such events as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * backing device going offline due to errors, media removals, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * This check is less efficient than dmz_bdev_is_dying() and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  * only be performed as a part of error handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) bool dmz_check_bdev(struct dmz_dev *dmz_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	dmz_dev->flags &= ~DMZ_CHECK_BDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (dmz_bdev_is_dying(dmz_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	disk = dmz_dev->bdev->bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	if (disk->fops->check_events &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	    disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		dmz_dev_warn(dmz_dev, "Backing device offline");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		dmz_dev->flags |= DMZ_BDEV_DYING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	return !(dmz_dev->flags & DMZ_BDEV_DYING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  * Process a new BIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static int dmz_map(struct dm_target *ti, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	struct dmz_metadata *zmd = dmz->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	sector_t sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	unsigned int nr_sectors = bio_sectors(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	sector_t chunk_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	if (dmz_dev_is_dying(zmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		dmz_metadata_label(zmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		bio_op(bio), (unsigned long long)sector, nr_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		(unsigned long long)dmz_bio_chunk(zmd, bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		(unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		(unsigned int)dmz_bio_blocks(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	/* The BIO should be block aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	/* Initialize the BIO context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	bioctx->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	bioctx->zone = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	bioctx->bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	refcount_set(&bioctx->ref, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	/* Set the BIO pending in the flush list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		spin_lock(&dmz->flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		bio_list_add(&dmz->flush_list, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		spin_unlock(&dmz->flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	/* Split zone BIOs to fit entirely into a zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	/* Now ready to handle this BIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	ret = dmz_queue_chunk_work(dmz, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		DMDEBUG("(%s): BIO op %d, can't process chunk %llu, err %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			dmz_metadata_label(zmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		return DM_MAPIO_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * Get zoned device information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) static int dmz_get_zoned_device(struct dm_target *ti, char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 				int idx, int nr_devs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	struct dm_dev *ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	struct dmz_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	/* Get the target device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		ti->error = "Get target device failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	bdev = ddev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		if (nr_devs == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			ti->error = "Invalid regular device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		if (idx != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			ti->error = "First device must be a regular device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		if (dmz->ddev[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			ti->error = "Too many regular devices";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		dev = &dmz->dev[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		dev->flags = DMZ_BDEV_REGULAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		if (dmz->ddev[idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			ti->error = "Too many zoned devices";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		if (nr_devs > 1 && idx == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			ti->error = "First device must be a regular device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		dev = &dmz->dev[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	dev->bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	dev->dev_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	(void)bdevname(dev->bdev, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (ti->begin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		ti->error = "Partial mapping is not supported";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	dmz->ddev[idx] = ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	dm_put_device(ti, ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * Cleanup zoned device information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) static void dmz_put_zoned_device(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	for (i = 0; i < dmz->nr_ddevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		if (dmz->ddev[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			dm_put_device(ti, dmz->ddev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			dmz->ddev[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) static int dmz_fixup_devices(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	struct dmz_dev *reg_dev, *zoned_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	sector_t zone_nr_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	 * When we have more than on devices, the first one must be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	 * regular block device and the others zoned block devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (dmz->nr_ddevs > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		reg_dev = &dmz->dev[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		if (!(reg_dev->flags & DMZ_BDEV_REGULAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			ti->error = "Primary disk is not a regular device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		for (i = 1; i < dmz->nr_ddevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			zoned_dev = &dmz->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 				ti->error = "Secondary disk is not a zoned device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			q = bdev_get_queue(zoned_dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			if (zone_nr_sectors &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			    zone_nr_sectors != blk_queue_zone_sectors(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 				ti->error = "Zone nr sectors mismatch";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			zone_nr_sectors = blk_queue_zone_sectors(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			zoned_dev->zone_nr_sectors = zone_nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			zoned_dev->nr_zones =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 				blkdev_nr_zones(zoned_dev->bdev->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		reg_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		zoned_dev = &dmz->dev[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			ti->error = "Disk is not a zoned device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		q = bdev_get_queue(zoned_dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	if (reg_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		sector_t zone_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		reg_dev->zone_nr_sectors = zone_nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		reg_dev->nr_zones =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			DIV_ROUND_UP_SECTOR_T(reg_dev->capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 					      reg_dev->zone_nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		reg_dev->zone_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		zone_offset = reg_dev->nr_zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		for (i = 1; i < dmz->nr_ddevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			dmz->dev[i].zone_offset = zone_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			zone_offset += dmz->dev[i].nr_zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831)  * Setup target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	struct dmz_target *dmz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	/* Check arguments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	if (argc < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		ti->error = "Invalid argument count";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	/* Allocate and initialize the target descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (!dmz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		ti->error = "Unable to allocate the zoned target descriptor";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (!dmz->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		ti->error = "Unable to allocate the zoned device descriptors";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		kfree(dmz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	if (!dmz->ddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		ti->error = "Unable to allocate the dm device descriptors";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	dmz->nr_ddevs = argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	ti->private = dmz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	/* Get the target zoned block device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	for (i = 0; i < argc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		ret = dmz_get_zoned_device(ti, argv[i], i, argc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			goto err_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	ret = dmz_fixup_devices(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		goto err_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	/* Initialize metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			       dm_table_device_name(ti->table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		ti->error = "Metadata initialization failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		goto err_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	/* Set target (no write same support) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	ti->num_flush_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	ti->num_discard_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	ti->num_write_zeroes_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	ti->per_io_data_size = sizeof(struct dmz_bioctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	ti->flush_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	ti->discards_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	/* The exposed capacity is the number of chunks that can be mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		dmz_zone_nr_sectors_shift(dmz->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	/* Zone BIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		ti->error = "Create BIO set failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		goto err_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	/* Chunk BIO work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	mutex_init(&dmz->chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 					WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 					dmz_metadata_label(dmz->metadata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	if (!dmz->chunk_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		ti->error = "Create chunk workqueue failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		goto err_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	/* Flush work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	spin_lock_init(&dmz->flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	bio_list_init(&dmz->flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 						dmz_metadata_label(dmz->metadata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (!dmz->flush_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		ti->error = "Create flush workqueue failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		goto err_cwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	/* Initialize reclaim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	for (i = 0; i < dmz->nr_ddevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			ti->error = "Zone reclaim initialization failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			goto err_fwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	DMINFO("(%s): Target device: %llu 512-byte logical sectors (%llu blocks)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	       dmz_metadata_label(dmz->metadata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	       (unsigned long long)ti->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	       (unsigned long long)dmz_sect2blk(ti->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) err_fwq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	destroy_workqueue(dmz->flush_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) err_cwq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	destroy_workqueue(dmz->chunk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) err_bio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	mutex_destroy(&dmz->chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	bioset_exit(&dmz->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) err_meta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	dmz_dtr_metadata(dmz->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) err_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	dmz_put_zoned_device(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	kfree(dmz->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	kfree(dmz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  * Cleanup target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) static void dmz_dtr(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	flush_workqueue(dmz->chunk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	destroy_workqueue(dmz->chunk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	for (i = 0; i < dmz->nr_ddevs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		dmz_dtr_reclaim(dmz->dev[i].reclaim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	cancel_delayed_work_sync(&dmz->flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	destroy_workqueue(dmz->flush_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	(void) dmz_flush_metadata(dmz->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	dmz_dtr_metadata(dmz->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	bioset_exit(&dmz->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	dmz_put_zoned_device(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	mutex_destroy(&dmz->chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	kfree(dmz->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	kfree(dmz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * Setup target request queue limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	limits->logical_block_size = DMZ_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	limits->physical_block_size = DMZ_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	limits->discard_alignment = DMZ_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	limits->discard_granularity = DMZ_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	limits->max_discard_sectors = chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	limits->max_hw_discard_sectors = chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	limits->max_write_zeroes_sectors = chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	/* FS hint to try to align to the device zone size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	limits->chunk_sectors = chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	limits->max_sectors = chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	/* We are exposing a drive-managed zoned block device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	limits->zoned = BLK_ZONED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  * Pass on ioctl to the backend device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	struct dmz_dev *dev = &dmz->dev[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	if (!dmz_check_bdev(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	*bdev = dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  * Stop works on suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static void dmz_suspend(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	flush_workqueue(dmz->chunk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	for (i = 0; i < dmz->nr_ddevs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		dmz_suspend_reclaim(dmz->dev[i].reclaim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	cancel_delayed_work_sync(&dmz->flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  * Restart works on resume or if suspend failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static void dmz_resume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	for (i = 0; i < dmz->nr_ddevs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		dmz_resume_reclaim(dmz->dev[i].reclaim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static int dmz_iterate_devices(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			       iterate_devices_callout_fn fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	sector_t capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	for (i = 0; i < dmz->nr_ddevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		r = fn(ti, dmz->ddev[i], 0, capacity, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static void dmz_status(struct dm_target *ti, status_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		       unsigned int status_flags, char *result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		       unsigned int maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	ssize_t sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	char buf[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct dmz_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		DMEMIT("%u zones %u/%u cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		       dmz_nr_zones(dmz->metadata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		       dmz_nr_unmap_cache_zones(dmz->metadata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		       dmz_nr_cache_zones(dmz->metadata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		for (i = 0; i < dmz->nr_ddevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			 * For a multi-device setup the first device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			 * contains only cache zones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			if ((i == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			    (dmz_nr_cache_zones(dmz->metadata) > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			DMEMIT(" %u/%u random %u/%u sequential",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			       dmz_nr_unmap_rnd_zones(dmz->metadata, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			       dmz_nr_rnd_zones(dmz->metadata, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			       dmz_nr_unmap_seq_zones(dmz->metadata, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			       dmz_nr_seq_zones(dmz->metadata, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	case STATUSTYPE_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		dev = &dmz->dev[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		format_dev_t(buf, dev->bdev->bd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		DMEMIT("%s", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		for (i = 1; i < dmz->nr_ddevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			dev = &dmz->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			format_dev_t(buf, dev->bdev->bd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			DMEMIT(" %s", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		       char *result, unsigned int maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	struct dmz_target *dmz = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	if (!strcasecmp(argv[0], "reclaim")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		for (i = 0; i < dmz->nr_ddevs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			dmz_schedule_reclaim(dmz->dev[i].reclaim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		DMERR("unrecognized message %s", argv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static struct target_type dmz_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	.name		 = "zoned",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	.version	 = {2, 0, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	.features	 = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	.module		 = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	.ctr		 = dmz_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	.dtr		 = dmz_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	.map		 = dmz_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	.io_hints	 = dmz_io_hints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	.prepare_ioctl	 = dmz_prepare_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	.postsuspend	 = dmz_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	.resume		 = dmz_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	.iterate_devices = dmz_iterate_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	.status		 = dmz_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	.message	 = dmz_message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static int __init dmz_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	return dm_register_target(&dmz_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static void __exit dmz_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	dm_unregister_target(&dmz_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) module_init(dmz_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) module_exit(dmz_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) MODULE_LICENSE("GPL");