Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Copyright (C) 2001 Sistina Software (UK) Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include "dm-core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define DM_MSG_PREFIX "table"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define NODE_SIZE L1_CACHE_BYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * Similar to ceiling(log_size(n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) static unsigned int int_log(unsigned int n, unsigned int base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	while (n > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 		n = dm_div_up(n, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 		result++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * Calculate the index of the child node of the n'th node k'th key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static inline unsigned int get_child(unsigned int n, unsigned int k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	return (n * CHILDREN_PER_NODE) + k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * Return the n'th node of level l from table t.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static inline sector_t *get_node(struct dm_table *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 				 unsigned int l, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	return t->index[l] + (n * KEYS_PER_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * Return the highest key that you could lookup from the n'th
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * node on level l of the btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	for (; l < t->depth - 1; l++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		n = get_child(n, CHILDREN_PER_NODE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	if (n >= t->counts[l])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		return (sector_t) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * Fills in a level of the btree based on the highs of the level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  * below it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static int setup_btree_index(unsigned int l, struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	unsigned int n, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	sector_t *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	for (n = 0U; n < t->counts[l]; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		node = get_node(t, l, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		for (k = 0U; k < KEYS_PER_NODE; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 			node[k] = high(t, l + 1, get_child(n, k));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 * Check that we're not going to overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	if (nmemb > (ULONG_MAX / elem_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	size = nmemb * elem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	addr = vzalloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) EXPORT_SYMBOL(dm_vcalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * highs, and targets are managed as dynamic arrays during a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  * table load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) static int alloc_targets(struct dm_table *t, unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	sector_t *n_highs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	struct dm_target *n_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	 * Allocate both the target array and offset array at once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 					  sizeof(sector_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (!n_highs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	n_targets = (struct dm_target *) (n_highs + num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	memset(n_highs, -1, sizeof(*n_highs) * num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	vfree(t->highs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	t->num_allocated = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	t->highs = n_highs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	t->targets = n_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) int dm_table_create(struct dm_table **result, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		    unsigned num_targets, struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	INIT_LIST_HEAD(&t->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if (!num_targets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		num_targets = KEYS_PER_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	if (!num_targets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		kfree(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	if (alloc_targets(t, num_targets)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		kfree(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	t->type = DM_TYPE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	t->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	t->md = md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	*result = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static void free_devices(struct list_head *devices, struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct list_head *tmp, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	list_for_each_safe(tmp, next, devices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		struct dm_dev_internal *dd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		    list_entry(tmp, struct dm_dev_internal, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		       dm_device_name(md), dd->dm_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		dm_put_table_device(md, dd->dm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		kfree(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static void dm_table_destroy_keyslot_manager(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) void dm_table_destroy(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	/* free the indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	if (t->depth >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		vfree(t->index[t->depth - 2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	/* free the targets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	for (i = 0; i < t->num_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		struct dm_target *tgt = t->targets + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		if (tgt->type->dtr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			tgt->type->dtr(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		dm_put_target_type(tgt->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	vfree(t->highs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	/* free the device list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	free_devices(&t->devices, t->md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	dm_free_md_mempools(t->mempools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	dm_table_destroy_keyslot_manager(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	kfree(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * See if we've already got a device in the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	struct dm_dev_internal *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	list_for_each_entry (dd, l, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		if (dd->dm_dev->bdev->bd_dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			return dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  * If possible, this checks an area of a destination device is invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 				  sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct queue_limits *limits = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	struct block_device *bdev = dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	sector_t dev_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	unsigned short logical_block_size_sectors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		limits->logical_block_size >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (!dev_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	if ((start >= dev_size) || (start + len > dev_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		DMWARN("%s: %s too small for target: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		       "start=%llu, len=%llu, dev_size=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		       dm_device_name(ti->table->md), bdevname(bdev, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		       (unsigned long long)start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		       (unsigned long long)len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		       (unsigned long long)dev_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 * If the target is mapped to zoned block device(s), check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 * that the zones are not partially mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		unsigned int zone_sectors = bdev_zone_sectors(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		if (start & (zone_sectors - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			       dm_device_name(ti->table->md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			       (unsigned long long)start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			       zone_sectors, bdevname(bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		 * Note: The last zone of a zoned block device may be smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		 * than other zones. So for a target mapping the end of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		 * zoned block device with such a zone, len would not be zone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		 * aligned. We do not allow such last smaller zone to be part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		 * of the mapping here to ensure that mappings with multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		 * devices do not end up with a smaller zone in the middle of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		 * the sector range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		if (len & (zone_sectors - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 			DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 			       dm_device_name(ti->table->md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			       (unsigned long long)len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			       zone_sectors, bdevname(bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (logical_block_size_sectors <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	if (start & (logical_block_size_sectors - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		DMWARN("%s: start=%llu not aligned to h/w "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		       "logical block size %u of %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		       dm_device_name(ti->table->md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		       (unsigned long long)start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		       limits->logical_block_size, bdevname(bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (len & (logical_block_size_sectors - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		DMWARN("%s: len=%llu not aligned to h/w "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		       "logical block size %u of %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		       dm_device_name(ti->table->md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		       (unsigned long long)len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		       limits->logical_block_size, bdevname(bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  * This upgrades the mode on an already open dm_dev, being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * careful to leave things as they were if we fail to reopen the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  * device and not to touch the existing bdev field in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * it is accessed concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	struct dm_dev *old_dev, *new_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	old_dev = dd->dm_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 				dd->dm_dev->mode | new_mode, &new_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	dd->dm_dev = new_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	dm_put_table_device(md, old_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  * Convert the path to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) dev_t dm_get_dev_t(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	dev_t dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	bdev = lookup_bdev(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (IS_ERR(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		dev = name_to_dev_t(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		dev = bdev->bd_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) EXPORT_SYMBOL_GPL(dm_get_dev_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  * Add a device to the list, or just increment the usage count if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  * it's already present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		  struct dm_dev **result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	dev_t dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	unsigned int major, minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	struct dm_dev_internal *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	struct dm_table *t = ti->table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	BUG_ON(!t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		/* Extract the major/minor numbers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		dev = MKDEV(major, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		if (MAJOR(dev) != major || MINOR(dev) != minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		dev = dm_get_dev_t(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	dd = find_device(&t->devices, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (!dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			kfree(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		refcount_set(&dd->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		list_add(&dd->list, &t->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		r = upgrade_mode(dd, mode, t->md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	refcount_inc(&dd->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	*result = dd->dm_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) EXPORT_SYMBOL(dm_get_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 				sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct queue_limits *limits = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	struct block_device *bdev = dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	struct request_queue *q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (unlikely(!q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		DMWARN("%s: Cannot set limits for nonexistent device %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		       dm_device_name(ti->table->md), bdevname(bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (blk_stack_limits(limits, &q->limits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			get_start_sect(bdev) + start) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		       "physical_block_size=%u, logical_block_size=%u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		       "alignment_offset=%u, start=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		       dm_device_name(ti->table->md), bdevname(bdev, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		       q->limits.physical_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		       q->limits.logical_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		       q->limits.alignment_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		       (unsigned long long) start << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450)  * Decrement a device's use count and remove it if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) void dm_put_device(struct dm_target *ti, struct dm_dev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	struct list_head *devices = &ti->table->devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	struct dm_dev_internal *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	list_for_each_entry(dd, devices, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		if (dd->dm_dev == d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 			found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		DMWARN("%s: device %s not in table devices list",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		       dm_device_name(ti->table->md), d->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (refcount_dec_and_test(&dd->count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		dm_put_table_device(ti->table->md, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		list_del(&dd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		kfree(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) EXPORT_SYMBOL(dm_put_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * Checks to see if the target joins onto the end of the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static int adjoin(struct dm_table *table, struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct dm_target *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (!table->num_targets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		return !ti->begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	prev = &table->targets[table->num_targets - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	return (ti->begin == (prev->begin + prev->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  * Used to dynamically allocate the arg array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * process messages even if some device is suspended. These messages have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * small fixed number of arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  * On the other hand, dm-switch needs to process bulk data using messages and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  * excessive use of GFP_NOIO could cause trouble.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) static char **realloc_argv(unsigned *size, char **old_argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	char **argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	unsigned new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	gfp_t gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (*size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		new_size = *size * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		gfp = GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		new_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		gfp = GFP_NOIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	if (argv && old_argv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		memcpy(argv, old_argv, *size * sizeof(*argv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		*size = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	kfree(old_argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	return argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  * Destructively splits up the argument list to pass to ctr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) int dm_split_args(int *argc, char ***argvp, char *input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	char *start, *end = input, *out, **argv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	unsigned array_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	*argc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (!input) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		*argvp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	argv = realloc_argv(&array_size, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (!argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		/* Skip whitespace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		start = skip_spaces(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		if (!*start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			break;	/* success, we hit the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		/* 'out' is used to remove any back-quotes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		end = out = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		while (*end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			/* Everything apart from '\0' can be quoted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			if (*end == '\\' && *(end + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 				*out++ = *(end + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 				end += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			if (isspace(*end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 				break;	/* end of token */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			*out++ = *end++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		/* have we already filled the array ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		if ((*argc + 1) > array_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			argv = realloc_argv(&array_size, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			if (!argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		/* we know this is whitespace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		if (*end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			end++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		/* terminate the string and put it in the array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		*out = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		argv[*argc] = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		(*argc)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	*argvp = argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * Impose necessary and sufficient conditions on a devices's table such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * that any incoming bio which respects its logical_block_size can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  * processed successfully.  If it falls across the boundary between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * two or more targets, the size of each piece it gets split into must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  * be compatible with the logical_block_size of the target processing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) static int validate_hardware_logical_block_alignment(struct dm_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 						 struct queue_limits *limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * This function uses arithmetic modulo the logical_block_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 * (in units of 512-byte sectors).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	unsigned short device_logical_block_size_sects =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		limits->logical_block_size >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 * Offset of the start of the next table entry, mod logical_block_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	unsigned short next_target_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	 * Given an aligned bio that extends beyond the end of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	 * target, how many sectors must the next target handle?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	unsigned short remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	struct queue_limits ti_limits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 * Check each entry in the table in turn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	for (i = 0; i < dm_table_get_num_targets(table); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		ti = dm_table_get_target(table, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		blk_set_stacking_limits(&ti_limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		/* combine all target devices' limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		if (ti->type->iterate_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			ti->type->iterate_devices(ti, dm_set_device_limits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 						  &ti_limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		 * If the remaining sectors fall entirely within this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		 * table entry are they compatible with its logical_block_size?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		if (remaining < ti->len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		    remaining & ((ti_limits.logical_block_size >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 				  SECTOR_SHIFT) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			break;	/* Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		next_target_start =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		    (unsigned short) ((next_target_start + ti->len) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				      (device_logical_block_size_sects - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		remaining = next_target_start ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		    device_logical_block_size_sects - next_target_start : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		DMWARN("%s: table line %u (start sect %llu len %llu) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		       "not aligned to h/w logical block size %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		       dm_device_name(table->md), i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		       (unsigned long long) ti->begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		       (unsigned long long) ti->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		       limits->logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) int dm_table_add_target(struct dm_table *t, const char *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			sector_t start, sector_t len, char *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	int r = -EINVAL, argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	char **argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	struct dm_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	if (t->singleton) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		DMERR("%s: target type %s must appear alone in table",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		      dm_device_name(t->md), t->targets->type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	BUG_ON(t->num_targets >= t->num_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	tgt = t->targets + t->num_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	memset(tgt, 0, sizeof(*tgt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (!len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		DMERR("%s: zero-length target", dm_device_name(t->md));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	tgt->type = dm_get_target_type(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (!tgt->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (dm_target_needs_singleton(tgt->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		if (t->num_targets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 			tgt->error = "singleton target type must appear alone in table";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		t->singleton = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		tgt->error = "target type may not be included in a read-only table";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if (t->immutable_target_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		if (t->immutable_target_type != tgt->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			tgt->error = "immutable target type cannot be mixed with other target types";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	} else if (dm_target_is_immutable(tgt->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		if (t->num_targets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			tgt->error = "immutable target type cannot be mixed with other target types";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		t->immutable_target_type = tgt->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	if (dm_target_has_integrity(tgt->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		t->integrity_added = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	tgt->table = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	tgt->begin = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	tgt->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	tgt->error = "Unknown error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 * Does this target adjoin the previous one ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (!adjoin(t, tgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		tgt->error = "Gap in table";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	r = dm_split_args(&argc, &argv, params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		tgt->error = "couldn't split parameters (insufficient memory)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	r = tgt->type->ctr(tgt, argc, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	kfree(argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (!tgt->num_discard_bios && tgt->discards_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		       dm_device_name(t->md), type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	dm_put_target_type(tgt->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * Target argument parsing helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) static int validate_next_arg(const struct dm_arg *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			     struct dm_arg_set *arg_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			     unsigned *value, char **error, unsigned grouped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	const char *arg_str = dm_shift_arg(arg_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (!arg_str ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	    (*value < arg->min) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	    (*value > arg->max) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	    (grouped && arg_set->argc < *value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		*error = arg->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		unsigned *value, char **error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	return validate_next_arg(arg, arg_set, value, error, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) EXPORT_SYMBOL(dm_read_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		      unsigned *value, char **error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	return validate_next_arg(arg, arg_set, value, error, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) EXPORT_SYMBOL(dm_read_arg_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) const char *dm_shift_arg(struct dm_arg_set *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	char *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	if (as->argc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		as->argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		r = *as->argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		as->argv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) EXPORT_SYMBOL(dm_shift_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	BUG_ON(as->argc < num_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	as->argc -= num_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	as->argv += num_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) EXPORT_SYMBOL(dm_consume_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static bool __table_type_bio_based(enum dm_queue_mode table_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	return (table_type == DM_TYPE_BIO_BASED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		table_type == DM_TYPE_DAX_BIO_BASED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) static bool __table_type_request_based(enum dm_queue_mode table_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	return table_type == DM_TYPE_REQUEST_BASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	t->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) EXPORT_SYMBOL_GPL(dm_table_set_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) /* validate the dax capability of the target device span */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	int blocksize = *(int *) data, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	bool rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	id = dax_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	dax_read_unlock(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) /* Check devices support synchronous DAX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 					      sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) bool dm_table_supports_dax(struct dm_table *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			   iterate_devices_callout_fn iterate_fn, int *blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	/* Ensure that all targets support DAX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		if (!ti->type->direct_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		if (!ti->type->iterate_devices ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		    ti->type->iterate_devices(ti, iterate_fn, blocksize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				  sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	struct block_device *bdev = dev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct request_queue *q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	/* request-based cannot stack on partitions! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (bdev_is_partition(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	return queue_is_mq(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) static int dm_table_determine_type(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	unsigned bio_based = 0, request_based = 0, hybrid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	struct dm_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	struct list_head *devices = dm_table_get_devices(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	int page_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if (t->type != DM_TYPE_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		/* target already set the table's type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		if (t->type == DM_TYPE_BIO_BASED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			/* possibly upgrade to a variant of bio-based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			goto verify_bio_based;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		goto verify_rq_based;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	for (i = 0; i < t->num_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		tgt = t->targets + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		if (dm_target_hybrid(tgt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			hybrid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		else if (dm_target_request_based(tgt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 			request_based = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			bio_based = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		if (bio_based && request_based) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			DMERR("Inconsistent table: different target types"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			      " can't be mixed up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (hybrid && !bio_based && !request_based) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		 * The targets can work either way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		 * Determine the type from the live device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		 * Default to bio-based if device is new.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		if (__table_type_request_based(live_md_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			request_based = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			bio_based = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (bio_based) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) verify_bio_based:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		/* We must use this table as bio-based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		t->type = DM_TYPE_BIO_BASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			t->type = DM_TYPE_DAX_BIO_BASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	BUG_ON(!request_based); /* No targets in this table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	t->type = DM_TYPE_REQUEST_BASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) verify_rq_based:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 * Request-based dm supports only tables that have a single target now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 * To support multiple targets, request splitting support is needed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 * and that needs lots of changes in the block-layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	 * (e.g. request completion process for partial completion.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (t->num_targets > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		DMERR("request-based DM doesn't support multiple targets");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (list_empty(devices)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		int srcu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		/* inherit live table's type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		if (live_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			t->type = live_table->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		dm_put_live_table(t->md, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	tgt = dm_table_get_immutable_target(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	if (!tgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		DMERR("table load rejected: immutable target is required");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	} else if (tgt->max_io_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		DMERR("table load rejected: immutable target that splits IO is not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	/* Non-request-stackable devices can't be used for request-based dm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (!tgt->type->iterate_devices ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	    !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		DMERR("table load rejected: including non-request-stackable devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) enum dm_queue_mode dm_table_get_type(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	return t->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return t->immutable_target_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	/* Immutable target is implicitly a singleton */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	if (t->num_targets > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	    !dm_target_is_immutable(t->targets[0].type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	return t->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		if (dm_target_is_wildcard(ti->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			return ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) bool dm_table_bio_based(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	return __table_type_bio_based(dm_table_get_type(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) bool dm_table_request_based(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	return __table_type_request_based(dm_table_get_type(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	enum dm_queue_mode type = dm_table_get_type(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	unsigned per_io_data_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	unsigned min_pool_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (unlikely(type == DM_TYPE_NONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		DMWARN("no table type is set, can't allocate mempools");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	if (__table_type_bio_based(type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		for (i = 0; i < t->num_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			ti = t->targets + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			min_pool_size = max(min_pool_size, ti->num_flush_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 					   per_io_data_size, min_pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (!t->mempools)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) void dm_table_free_md_mempools(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	dm_free_md_mempools(t->mempools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	t->mempools = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	return t->mempools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static int setup_indexes(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	unsigned int total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	sector_t *indexes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	/* allocate the space for *all* the indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	for (i = t->depth - 2; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		total += t->counts[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	if (!indexes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	/* set up internal nodes, bottom-up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	for (i = t->depth - 2; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		t->index[i] = indexes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		indexes += (KEYS_PER_NODE * t->counts[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		setup_btree_index(i, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  * Builds the btree to index the map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static int dm_table_build_index(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	unsigned int leaf_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	/* how many indexes will the btree have ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	/* leaf layer has already been set up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	t->counts[t->depth - 1] = leaf_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	t->index[t->depth - 1] = t->highs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	if (t->depth >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		r = setup_indexes(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static bool integrity_profile_exists(struct gendisk *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	return !!blk_get_integrity(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  * Get a disk whose integrity profile reflects the table's profile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)  * Returns NULL if integrity support was inconsistent or unavailable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	struct list_head *devices = dm_table_get_devices(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	struct dm_dev_internal *dd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	struct gendisk *prev_disk = NULL, *template_disk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		struct dm_target *ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		if (!dm_target_passes_integrity(ti->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			goto no_integrity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	list_for_each_entry(dd, devices, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		template_disk = dd->dm_dev->bdev->bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		if (!integrity_profile_exists(template_disk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			goto no_integrity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		else if (prev_disk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			 blk_integrity_compare(prev_disk, template_disk) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			goto no_integrity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		prev_disk = template_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	return template_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) no_integrity:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (prev_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		DMWARN("%s: integrity not set: %s and %s profile mismatch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		       dm_device_name(t->md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		       prev_disk->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		       template_disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)  * Register the mapped device for blk_integrity support if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)  * underlying devices have an integrity profile.  But all devices may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)  * not have matching profiles (checking all devices isn't reliable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)  * during table load because this table may use other DM device(s) which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)  * must be resumed before they will have an initialized integity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  * profile).  Consequently, stacked DM devices force a 2 stage integrity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)  * profile validation: First pass during table load, final pass during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)  * resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static int dm_table_register_integrity(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	struct mapped_device *md = t->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	struct gendisk *template_disk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	/* If target handles integrity itself do not register it here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	if (t->integrity_added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	template_disk = dm_table_get_integrity_disk(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (!template_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (!integrity_profile_exists(dm_disk(md))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		t->integrity_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		 * Register integrity profile during table load; we can do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		 * this because the final profile must match during resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		blk_integrity_register(dm_disk(md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 				       blk_get_integrity(template_disk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	 * If DM device already has an initialized integrity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	 * profile the new profile should not conflict.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		DMWARN("%s: conflict with existing integrity profile: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		       "%s profile mismatch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		       dm_device_name(t->md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		       template_disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	/* Preserve existing integrity profile */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	t->integrity_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) #ifdef CONFIG_BLK_INLINE_ENCRYPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) struct dm_keyslot_manager {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	struct blk_keyslot_manager ksm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	struct mapped_device *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct dm_keyslot_evict_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	const struct blk_crypto_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 				     sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	struct dm_keyslot_evict_args *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	if (!args->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		args->err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	/* Always try to evict the key from all devices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)  * When an inline encryption key is evicted from a device-mapper device, evict
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  * it from all the underlying devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			    const struct blk_crypto_key *key, unsigned int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	struct dm_keyslot_manager *dksm = container_of(ksm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 						       struct dm_keyslot_manager,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 						       ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	struct mapped_device *md = dksm->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	struct dm_keyslot_evict_args args = { key };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	struct dm_table *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	int srcu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	t = dm_get_live_table(md, &srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		if (!ti->type->iterate_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	dm_put_live_table(md, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	return args.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct dm_derive_raw_secret_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	const u8 *wrapped_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	unsigned int wrapped_key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	u8 *secret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	unsigned int secret_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static int dm_derive_raw_secret_callback(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 					 struct dm_dev *dev, sector_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 					 sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	struct dm_derive_raw_secret_args *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	if (!args->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (!q->ksm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		args->err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	args->err = blk_ksm_derive_raw_secret(q->ksm, args->wrapped_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 					      args->wrapped_key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 					      args->secret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 					      args->secret_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	/* Try another device in case this fails. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  * Retrieve the raw_secret from the underlying device.  Given that only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  * raw_secret can exist for a particular wrappedkey, retrieve it only from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)  * first device that supports derive_raw_secret().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static int dm_derive_raw_secret(struct blk_keyslot_manager *ksm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				const u8 *wrapped_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 				unsigned int wrapped_key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				u8 *secret, unsigned int secret_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	struct dm_keyslot_manager *dksm = container_of(ksm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 						       struct dm_keyslot_manager,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 						       ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	struct mapped_device *md = dksm->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	struct dm_derive_raw_secret_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		.wrapped_key = wrapped_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		.wrapped_key_size = wrapped_key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		.secret = secret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		.secret_size = secret_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		.err = -EOPNOTSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	struct dm_table *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	int srcu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	t = dm_get_live_table(md, &srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		if (!ti->type->iterate_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		ti->type->iterate_devices(ti, dm_derive_raw_secret_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 					  &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		if (!args.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	dm_put_live_table(md, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	return args.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static struct blk_ksm_ll_ops dm_ksm_ll_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	.keyslot_evict = dm_keyslot_evict,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	.derive_raw_secret = dm_derive_raw_secret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static int device_intersect_crypto_modes(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 					 struct dm_dev *dev, sector_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 					 sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	struct blk_keyslot_manager *parent = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	blk_ksm_intersect_modes(parent, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	struct dm_keyslot_manager *dksm = container_of(ksm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 						       struct dm_keyslot_manager,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 						       ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (!ksm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	blk_ksm_destroy(ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	kfree(dksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) static void dm_table_destroy_keyslot_manager(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	dm_destroy_keyslot_manager(t->ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	t->ksm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * Constructs and initializes t->ksm with a keyslot manager that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  * represents the common set of crypto capabilities of the devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  * described by the dm_table. However, if the constructed keyslot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)  * manager does not support a superset of the crypto capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)  * supported by the current keyslot manager of the mapped_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)  * it returns an error instead, since we don't support restricting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)  * crypto capabilities on table changes. Finally, if the constructed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)  * keyslot manager doesn't actually support any crypto modes at all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)  * it just returns NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static int dm_table_construct_keyslot_manager(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	struct dm_keyslot_manager *dksm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	struct blk_keyslot_manager *ksm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	bool ksm_is_empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	dksm = kmalloc(sizeof(*dksm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	if (!dksm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	dksm->md = t->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	ksm = &dksm->ksm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	blk_ksm_init_passthrough(ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	ksm->ksm_ll_ops = dm_ksm_ll_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	ksm->max_dun_bytes_supported = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	memset(ksm->crypto_modes_supported, 0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	       sizeof(ksm->crypto_modes_supported));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	ksm->features = BLK_CRYPTO_FEATURE_STANDARD_KEYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 			BLK_CRYPTO_FEATURE_WRAPPED_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		if (!dm_target_passes_crypto(ti->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			blk_ksm_intersect_modes(ksm, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		if (!ti->type->iterate_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		ti->type->iterate_devices(ti, device_intersect_crypto_modes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 					  ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		dm_destroy_keyslot_manager(ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	 * If the new KSM doesn't actually support any crypto modes, we may as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	 * well represent it with a NULL ksm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	ksm_is_empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		if (ksm->crypto_modes_supported[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 			ksm_is_empty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (ksm_is_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		dm_destroy_keyslot_manager(ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		ksm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	 * t->ksm is only set temporarily while the table is being set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	 * up, and it gets set to NULL after the capabilities have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	 * been transferred to the request_queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	t->ksm = ksm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static void dm_update_keyslot_manager(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 				      struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	if (!t->ksm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	/* Make the ksm less restrictive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	if (!q->ksm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		blk_ksm_register(t->ksm, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		blk_ksm_update_capabilities(q->ksm, t->ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		dm_destroy_keyslot_manager(t->ksm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	t->ksm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) #else /* CONFIG_BLK_INLINE_ENCRYPTION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static int dm_table_construct_keyslot_manager(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static void dm_table_destroy_keyslot_manager(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) static void dm_update_keyslot_manager(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 				      struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)  * Prepares the table for use by building the indices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)  * setting the type, and allocating mempools.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) int dm_table_complete(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	r = dm_table_determine_type(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		DMERR("unable to determine table type");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	r = dm_table_build_index(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		DMERR("unable to build btrees");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	r = dm_table_register_integrity(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		DMERR("could not register integrity profile.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	r = dm_table_construct_keyslot_manager(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		DMERR("could not construct keyslot manager.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	r = dm_table_alloc_md_mempools(t, t->md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		DMERR("unable to allocate mempools");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static DEFINE_MUTEX(_event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) void dm_table_event_callback(struct dm_table *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			     void (*fn)(void *), void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	mutex_lock(&_event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	t->event_fn = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	t->event_context = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	mutex_unlock(&_event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) void dm_table_event(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	mutex_lock(&_event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (t->event_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		t->event_fn(t->event_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	mutex_unlock(&_event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) EXPORT_SYMBOL(dm_table_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) inline sector_t dm_table_get_size(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) EXPORT_SYMBOL(dm_table_get_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	if (index >= t->num_targets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	return t->targets + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)  * Search the btree for the correct target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)  * Caller should check returned pointer for NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)  * to trap I/O beyond end of device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	unsigned int l, n = 0, k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	sector_t *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	if (unlikely(sector >= dm_table_get_size(t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	for (l = 0; l < t->depth; l++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		n = get_child(n, k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		node = get_node(t, l, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		for (k = 0; k < KEYS_PER_NODE; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 			if (node[k] >= sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	return &t->targets[(KEYS_PER_NODE * n) + k];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  * type->iterate_devices() should be called when the sanity check needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  * iterate and check all underlying data devices. iterate_devices() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)  * iterate all underlying data devices until it encounters a non-zero return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)  * code, returned by whether the input iterate_devices_callout_fn, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)  * iterate_devices() itself internally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)  * For some target type (e.g. dm-stripe), one call of iterate_devices() may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)  * iterate multiple underlying devices internally, in which case a non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)  * return code returned by iterate_devices_callout_fn will stop the iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)  * in advance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)  * Cases requiring _any_ underlying device supporting some kind of attribute,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)  * should use the iteration structure like dm_table_any_dev_attr(), or call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)  * it directly. @func should handle semantics of positive examples, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)  * capable of something.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)  * Cases requiring _all_ underlying devices supporting some kind of attribute,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)  * should use the iteration structure like dm_table_supports_nowait() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)  * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)  * uses an @anti_func that handle semantics of counter examples, e.g. not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)  * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static bool dm_table_any_dev_attr(struct dm_table *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 				  iterate_devices_callout_fn func, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		if (ti->type->iterate_devices &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		    ti->type->iterate_devices(ti, func, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static int count_device(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	unsigned *num_devices = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	(*num_devices)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)  * Check whether a table has no data devices attached using each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)  * target's iterate_devices method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)  * Returns false if the result is unknown because a target doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)  * support iterate_devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) bool dm_table_has_no_data_devices(struct dm_table *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	unsigned i, num_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	for (i = 0; i < dm_table_get_num_targets(table); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		ti = dm_table_get_target(table, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		if (!ti->type->iterate_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		num_devices = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		ti->type->iterate_devices(ti, count_device, &num_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		if (num_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 				  sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	enum blk_zoned_model *zoned_model = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	return !q || blk_queue_zoned_model(q) != *zoned_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)  * Check the device zoned model based on the target feature flag. If the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)  * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)  * also accepted but all devices must have the same zoned model. If the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)  * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)  * zoned model with all zoned devices having the same zone size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static bool dm_table_supports_zoned_model(struct dm_table *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 					  enum blk_zoned_model zoned_model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		if (dm_target_supports_zoned_hm(ti->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 			if (!ti->type->iterate_devices ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			    ti->type->iterate_devices(ti, device_not_zoned_model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 						      &zoned_model))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 			if (zoned_model == BLK_ZONED_HM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 					   sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	unsigned int *zone_sectors = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	if (!blk_queue_is_zoned(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	return !q || blk_queue_zone_sectors(q) != *zone_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)  * Check consistency of zoned model and zone sectors across all targets. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)  * zone sectors, if the destination device is a zoned block device, it shall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)  * have the specified zone_sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) static int validate_hardware_zoned_model(struct dm_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 					 enum blk_zoned_model zoned_model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 					 unsigned int zone_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	if (zoned_model == BLK_ZONED_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	if (!dm_table_supports_zoned_model(table, zoned_model)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		DMERR("%s: zoned model is not consistent across all devices",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		      dm_device_name(table->md));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	/* Check zone size validity and compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	if (!zone_sectors || !is_power_of_2(zone_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		DMERR("%s: zone sectors is not consistent across all zoned devices",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		      dm_device_name(table->md));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  * Establish the new table's queue_limits and validate them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) int dm_calculate_queue_limits(struct dm_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			      struct queue_limits *limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	struct queue_limits ti_limits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	unsigned int zone_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	blk_set_stacking_limits(limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	for (i = 0; i < dm_table_get_num_targets(table); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		blk_set_stacking_limits(&ti_limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		ti = dm_table_get_target(table, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		if (!ti->type->iterate_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 			goto combine_limits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		 * Combine queue limits of all the devices this target uses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		ti->type->iterate_devices(ti, dm_set_device_limits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 					  &ti_limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			 * After stacking all limits, validate all devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			 * in table support this zoned model and zone sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			zoned_model = ti_limits.zoned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			zone_sectors = ti_limits.chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		/* Set I/O hints portion of queue limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		if (ti->type->io_hints)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			ti->type->io_hints(ti, &ti_limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		 * Check each device area is consistent with the target's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		 * overall queue limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		if (ti->type->iterate_devices(ti, device_area_is_invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 					      &ti_limits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) combine_limits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		 * Merge this target's queue limits into the overall limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		 * for the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 			DMWARN("%s: adding target device "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 			       "(start sect %llu len %llu) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 			       "caused an alignment inconsistency",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 			       dm_device_name(table->md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 			       (unsigned long long) ti->begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 			       (unsigned long long) ti->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	 * Verify that the zoned model and zone sectors, as determined before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	 * any .io_hints override, are the same across all devices in the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	 * - this is especially relevant if .io_hints is emulating a disk-managed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	 *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	 * BUT...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (limits->zoned != BLK_ZONED_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		 * ...IF the above limits stacking determined a zoned model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		 * validate that all of the table's devices conform to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		zoned_model = limits->zoned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		zone_sectors = limits->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	return validate_hardware_logical_block_alignment(table, limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)  * Verify that all devices have an integrity profile that matches the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)  * DM device's registered integrity profile.  If the profiles don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)  * match then unregister the DM device's integrity profile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) static void dm_table_verify_integrity(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	struct gendisk *template_disk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	if (t->integrity_added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	if (t->integrity_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		 * Verify that the original integrity profile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		 * matches all the devices in this table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		template_disk = dm_table_get_integrity_disk(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		if (template_disk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	if (integrity_profile_exists(dm_disk(t->md))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		DMWARN("%s: unable to establish an integrity profile",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		       dm_device_name(t->md));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		blk_integrity_unregister(dm_disk(t->md));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 				sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	unsigned long flush = (unsigned long) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	return q && (q->queue_flags & flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	 * Require at least one underlying device to support flushes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	 * t->devices includes internal dm devices such as mirror logs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	 * so we need to use iterate_devices here, which targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	 * supporting flushes must provide.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		if (!ti->num_flush_bios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		if (ti->flush_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		if (ti->type->iterate_devices &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static int device_dax_write_cache_enabled(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 					  struct dm_dev *dev, sector_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 					  sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	struct dax_device *dax_dev = dev->dax_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	if (!dax_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	if (dax_write_cache_enabled(dax_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 				sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	return q && !blk_queue_nonrot(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			     sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	return q && !blk_queue_add_random(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 					 sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	return q && !q->limits.max_write_same_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) static bool dm_table_supports_write_same(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		if (!ti->num_write_same_bios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		if (!ti->type->iterate_devices ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 					   sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	return q && !q->limits.max_write_zeroes_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) static bool dm_table_supports_write_zeroes(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	unsigned i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	while (i < dm_table_get_num_targets(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		ti = dm_table_get_target(t, i++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		if (!ti->num_write_zeroes_bios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		if (!ti->type->iterate_devices ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 				     sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	return q && !blk_queue_nowait(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) static bool dm_table_supports_nowait(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	unsigned i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	while (i < dm_table_get_num_targets(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		ti = dm_table_get_target(t, i++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		if (!dm_target_supports_nowait(ti->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		if (!ti->type->iterate_devices ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		    ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 				      sector_t start, sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	return q && !blk_queue_discard(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) static bool dm_table_supports_discards(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		if (!ti->num_discard_bios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		 * Either the target provides discard support (as implied by setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		 * 'discards_supported') or it relies on _all_ data devices having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		 * discard support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		if (!ti->discards_supported &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		    (!ti->type->iterate_devices ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) static int device_not_secure_erase_capable(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 					   struct dm_dev *dev, sector_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 					   sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	return q && !blk_queue_secure_erase(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static bool dm_table_supports_secure_erase(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		ti = dm_table_get_target(t, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		if (!ti->num_secure_erase_bios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		if (!ti->type->iterate_devices ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		    ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) static int device_requires_stable_pages(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 					struct dm_dev *dev, sector_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 					sector_t len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	struct request_queue *q = bdev_get_queue(dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	return q && blk_queue_stable_writes(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 			       struct queue_limits *limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	bool wc = false, fua = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	int page_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	 * Copy table's limits to the DM device's request_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	q->limits = *limits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	if (dm_table_supports_nowait(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	if (!dm_table_supports_discards(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		/* Must also clear discard limits... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		q->limits.max_discard_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		q->limits.max_hw_discard_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		q->limits.discard_granularity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		q->limits.discard_alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		q->limits.discard_misaligned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	if (dm_table_supports_secure_erase(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		wc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 			fua = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	blk_queue_write_cache(q, wc, fua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 			set_dax_synchronous(t->md->dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		dax_write_cache(t->md->dax_dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	/* Ensure that all underlying devices are non-rotational. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	if (!dm_table_supports_write_same(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		q->limits.max_write_same_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	if (!dm_table_supports_write_zeroes(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		q->limits.max_write_zeroes_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	dm_table_verify_integrity(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	 * Some devices don't use blk_integrity but still want stable pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	 * because they do their own checksumming.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	 * If any underlying device requires stable pages, a table must require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	 * them as well.  Only targets that support iterate_devices are considered:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	 * don't want error, zero, etc to require stable pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	 * Determine whether or not this queue's I/O timings contribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	 * to the entropy pool, Only request-based targets use this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	 * have it set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	if (blk_queue_add_random(q) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	    dm_table_any_dev_attr(t, device_is_not_random, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	 * For a zoned target, the number of zones should be updated for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	 * correct value to be exposed in sysfs queue/nr_zones. For a BIO based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	 * target, this is all that is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	if (blk_queue_is_zoned(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		WARN_ON_ONCE(queue_is_mq(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		q->nr_zones = blkdev_nr_zones(t->md->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	dm_update_keyslot_manager(q, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	blk_queue_update_readahead(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) unsigned int dm_table_get_num_targets(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	return t->num_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) struct list_head *dm_table_get_devices(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	return &t->devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) fmode_t dm_table_get_mode(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	return t->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) EXPORT_SYMBOL(dm_table_get_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) enum suspend_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	PRESUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	PRESUSPEND_UNDO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	POSTSUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	int i = t->num_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	struct dm_target *ti = t->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	lockdep_assert_held(&t->md->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		case PRESUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 			if (ti->type->presuspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 				ti->type->presuspend(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		case PRESUSPEND_UNDO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			if (ti->type->presuspend_undo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 				ti->type->presuspend_undo(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		case POSTSUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 			if (ti->type->postsuspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 				ti->type->postsuspend(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		ti++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) void dm_table_presuspend_targets(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	suspend_targets(t, PRESUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) void dm_table_presuspend_undo_targets(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	suspend_targets(t, PRESUSPEND_UNDO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) void dm_table_postsuspend_targets(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	suspend_targets(t, POSTSUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) int dm_table_resume_targets(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	int i, r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	lockdep_assert_held(&t->md->suspend_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	for (i = 0; i < t->num_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		struct dm_target *ti = t->targets + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		if (!ti->type->preresume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		r = ti->type->preresume(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 			DMERR("%s: %s: preresume failed, error = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 			      dm_device_name(t->md), ti->type->name, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	for (i = 0; i < t->num_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		struct dm_target *ti = t->targets + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		if (ti->type->resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 			ti->type->resume(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) struct mapped_device *dm_table_get_md(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	return t->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) EXPORT_SYMBOL(dm_table_get_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) const char *dm_table_device_name(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	return dm_device_name(t->md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) EXPORT_SYMBOL_GPL(dm_table_device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) void dm_table_run_md_queue_async(struct dm_table *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	if (!dm_table_request_based(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	if (t->md->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		blk_mq_run_hw_queues(t->md->queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) EXPORT_SYMBOL(dm_table_run_md_queue_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)