Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright (C) 2003 Sistina Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2006 Red Hat GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "dm-core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/dm-io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define DM_MSG_PREFIX "io"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define DM_IO_MAX_REGIONS	BITS_PER_LONG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) struct dm_io_client {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	mempool_t pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct bio_set bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * Aligning 'struct io' reduces the number of bits required to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * its address.  Refer to store_io_and_region_in_bio() below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	unsigned long error_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	atomic_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct dm_io_client *client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	io_notify_fn callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	void *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	void *vma_invalidate_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	unsigned long vma_invalidate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) } __attribute__((aligned(DM_IO_MAX_REGIONS)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static struct kmem_cache *_dm_io_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * Create a client with mempool and bioset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) struct dm_io_client *dm_io_client_create(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct dm_io_client *client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	unsigned min_ios = dm_get_reserved_bio_based_ios();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	client = kzalloc(sizeof(*client), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (!client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	return client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)    bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	mempool_exit(&client->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	kfree(client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) EXPORT_SYMBOL(dm_io_client_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) void dm_io_client_destroy(struct dm_io_client *client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	mempool_exit(&client->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	bioset_exit(&client->bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	kfree(client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) EXPORT_SYMBOL(dm_io_client_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * We need to keep track of which region a bio is doing io for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * To avoid a memory allocation to store just 5 or 6 bits, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * ensure the 'struct io' pointer is aligned so enough low bits are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * always zero and then combine it with the region number directly in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * bi_private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 				       unsigned region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		DMCRIT("Unaligned struct io pointer %p", io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	bio->bi_private = (void *)((unsigned long)io | region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 				       unsigned *region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned long val = (unsigned long)bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	*region = val & (DM_IO_MAX_REGIONS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * We need an io object to keep track of the number of bios that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * have been dispatched for a particular io.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void complete_io(struct io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	unsigned long error_bits = io->error_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	io_notify_fn fn = io->callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	void *context = io->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (io->vma_invalidate_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		invalidate_kernel_vmap_range(io->vma_invalidate_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 					     io->vma_invalidate_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	mempool_free(io, &io->client->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	fn(error_bits, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void dec_count(struct io *io, unsigned int region, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		set_bit(region, &io->error_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (atomic_dec_and_test(&io->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		complete_io(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct io *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	unsigned region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	blk_status_t error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (bio->bi_status && bio_data_dir(bio) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		zero_fill_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * The bio destructor in bio_put() may use the io object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	retrieve_io_and_region_from_bio(bio, &io, &region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	error = bio->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	dec_count(io, region, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * These little objects provide an abstraction for getting a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * destination page for io.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct dpages {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	void (*get_page)(struct dpages *dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			 struct page **p, unsigned long *len, unsigned *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	void (*next_page)(struct dpages *dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		unsigned context_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		struct bvec_iter context_bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	void *context_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	void *vma_invalidate_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	unsigned long vma_invalidate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * Functions for getting the pages from a list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void list_get_page(struct dpages *dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		  struct page **p, unsigned long *len, unsigned *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	unsigned o = dp->context_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	struct page_list *pl = (struct page_list *) dp->context_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	*p = pl->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	*len = PAGE_SIZE - o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	*offset = o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void list_next_page(struct dpages *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	struct page_list *pl = (struct page_list *) dp->context_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	dp->context_ptr = pl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	dp->context_u = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	dp->get_page = list_get_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	dp->next_page = list_next_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	dp->context_u = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	dp->context_ptr = pl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * Functions for getting the pages from a bvec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static void bio_get_page(struct dpages *dp, struct page **p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			 unsigned long *len, unsigned *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 					     dp->context_bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	*p = bvec.bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	*len = bvec.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	*offset = bvec.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	/* avoid figuring it out again in bio_next_page() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void bio_next_page(struct dpages *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	unsigned int len = (unsigned int)dp->context_bi.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	bvec_iter_advance((struct bio_vec *)dp->context_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			  &dp->context_bi, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void bio_dp_init(struct dpages *dp, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	dp->get_page = bio_get_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	dp->next_page = bio_next_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	 * We just use bvec iterator to retrieve pages, so it is ok to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	 * access the bvec table directly here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	dp->context_ptr = bio->bi_io_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	dp->context_bi = bio->bi_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * Functions for getting the pages from a VMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void vm_get_page(struct dpages *dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		 struct page **p, unsigned long *len, unsigned *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	*p = vmalloc_to_page(dp->context_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	*offset = dp->context_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	*len = PAGE_SIZE - dp->context_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void vm_next_page(struct dpages *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	dp->context_ptr += PAGE_SIZE - dp->context_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	dp->context_u = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void vm_dp_init(struct dpages *dp, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	dp->get_page = vm_get_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	dp->next_page = vm_next_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	dp->context_u = offset_in_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	dp->context_ptr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * Functions for getting the pages from kernel memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			unsigned *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	*p = virt_to_page(dp->context_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	*offset = dp->context_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	*len = PAGE_SIZE - dp->context_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void km_next_page(struct dpages *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	dp->context_ptr += PAGE_SIZE - dp->context_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	dp->context_u = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static void km_dp_init(struct dpages *dp, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	dp->get_page = km_get_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	dp->next_page = km_next_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	dp->context_u = offset_in_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	dp->context_ptr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * IO routines that accept a list of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void do_region(int op, int op_flags, unsigned region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		      struct dm_io_region *where, struct dpages *dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		      struct io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	unsigned offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	unsigned num_bvecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	sector_t remaining = where->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct request_queue *q = bdev_get_queue(where->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	unsigned short logical_block_size = queue_logical_block_size(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	sector_t num_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	unsigned int special_cmd_max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 * Reject unsupported discard and write same requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (op == REQ_OP_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		special_cmd_max_sectors = q->limits.max_discard_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	else if (op == REQ_OP_WRITE_ZEROES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	else if (op == REQ_OP_WRITE_SAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		special_cmd_max_sectors = q->limits.max_write_same_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	     op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		atomic_inc(&io->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		dec_count(io, region, BLK_STS_NOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 * where->count may be zero if op holds a flush and we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 * send a zero-sized flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		 * Allocate a suitably sized-bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			num_bvecs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		case REQ_OP_WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			num_bvecs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			num_bvecs = min_t(int, BIO_MAX_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		bio_set_dev(bio, where->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		bio->bi_end_io = endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		bio_set_op_attrs(bio, op, op_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		store_io_and_region_in_bio(bio, io, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			remaining -= num_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		} else if (op == REQ_OP_WRITE_SAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			 * WRITE SAME only uses a single page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			dp->get_page(dp, &page, &len, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			bio_add_page(bio, page, logical_block_size, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			remaining -= num_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			dp->next_page(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		} else while (remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			 * Try and add as many pages as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			dp->get_page(dp, &page, &len, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			len = min(len, to_bytes(remaining));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			if (!bio_add_page(bio, page, len, offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			remaining -= to_sector(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			dp->next_page(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		atomic_inc(&io->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	} while (remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static void dispatch_io(int op, int op_flags, unsigned int num_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			struct dm_io_region *where, struct dpages *dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			struct io *io, int sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	struct dpages old_pages = *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	if (sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		op_flags |= REQ_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	 * For multiple regions we need to be careful to rewind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	 * the dp object for each call to do_region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	for (i = 0; i < num_regions; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		*dp = old_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		if (where[i].count || (op_flags & REQ_PREFLUSH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			do_region(op, op_flags, i, where + i, dp, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	 * Drop the extra reference that we were holding to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	 * the io being completed too early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	dec_count(io, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct sync_io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	unsigned long error_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	struct completion wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void sync_io_complete(unsigned long error, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	struct sync_io *sio = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	sio->error_bits = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	complete(&sio->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int sync_io(struct dm_io_client *client, unsigned int num_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		   struct dm_io_region *where, int op, int op_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		   struct dpages *dp, unsigned long *error_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	struct io *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	struct sync_io sio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	if (num_regions > 1 && !op_is_write(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	init_completion(&sio.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	io = mempool_alloc(&client->pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	io->error_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	atomic_set(&io->count, 1); /* see dispatch_io() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	io->client = client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	io->callback = sync_io_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	io->context = &sio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	io->vma_invalidate_address = dp->vma_invalidate_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	io->vma_invalidate_size = dp->vma_invalidate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	wait_for_completion_io(&sio.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	if (error_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		*error_bits = sio.error_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	return sio.error_bits ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static int async_io(struct dm_io_client *client, unsigned int num_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		    struct dm_io_region *where, int op, int op_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		    struct dpages *dp, io_notify_fn fn, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	struct io *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if (num_regions > 1 && !op_is_write(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		fn(1, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	io = mempool_alloc(&client->pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	io->error_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	atomic_set(&io->count, 1); /* see dispatch_io() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	io->client = client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	io->callback = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	io->context = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	io->vma_invalidate_address = dp->vma_invalidate_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	io->vma_invalidate_size = dp->vma_invalidate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		   unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	/* Set up dpages based on memory type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	dp->vma_invalidate_address = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	dp->vma_invalidate_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	switch (io_req->mem.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	case DM_IO_PAGE_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	case DM_IO_BIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		bio_dp_init(dp, io_req->mem.ptr.bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	case DM_IO_VMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		if (io_req->bi_op == REQ_OP_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 			dp->vma_invalidate_address = io_req->mem.ptr.vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 			dp->vma_invalidate_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		vm_dp_init(dp, io_req->mem.ptr.vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	case DM_IO_KMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		km_dp_init(dp, io_req->mem.ptr.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)  * New collapsed (a)synchronous interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)  * the queue with blk_unplug() some time later or set REQ_SYNC in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)  * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int dm_io(struct dm_io_request *io_req, unsigned num_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	  struct dm_io_region *where, unsigned long *sync_error_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	struct dpages dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (!io_req->notify.fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		return sync_io(io_req->client, num_regions, where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 			       io_req->bi_op, io_req->bi_op_flags, &dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 			       sync_error_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	return async_io(io_req->client, num_regions, where, io_req->bi_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			io_req->bi_op_flags, &dp, io_req->notify.fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			io_req->notify.context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) EXPORT_SYMBOL(dm_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int __init dm_io_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	_dm_io_cache = KMEM_CACHE(io, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	if (!_dm_io_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) void dm_io_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	kmem_cache_destroy(_dm_io_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	_dm_io_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }