^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kdev_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/list_bl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/dm-kcopyd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "dm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "dm-exception-store.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DM_MSG_PREFIX "snapshots"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define dm_target_is_snapshot_merge(ti) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ((ti)->type->name == dm_snapshot_merge_target_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * The size of the mempool used to track chunks in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define MIN_IOS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define DM_TRACKED_CHUNK_HASH_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) (DM_TRACKED_CHUNK_HASH_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct dm_exception_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) uint32_t hash_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned hash_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct hlist_bl_head *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct dm_snapshot {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct rw_semaphore lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct dm_dev *origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct dm_dev *cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* List of snapshots per Origin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * You can't use a snapshot if this is 0 (e.g. if full).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * A snapshot-merge target never clears this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * The snapshot overflowed because of a write to the snapshot device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * We don't have to invalidate the snapshot in this case, but we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * to prevent further writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int snapshot_overflowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Origin writes don't trigger exceptions until this is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) atomic_t pending_exceptions_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) spinlock_t pe_allocation_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Protected by "pe_allocation_lock" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) sector_t exception_start_sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Protected by kcopyd single-threaded callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) sector_t exception_complete_sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * A list of pending exceptions that completed out of order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Protected by kcopyd single-threaded callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct rb_root out_of_order_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) mempool_t pending_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct dm_exception_table pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct dm_exception_table complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * pe_lock protects all pending_exception operations and access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * as well as the snapshot_bios list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spinlock_t pe_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Chunks with outstanding reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spinlock_t tracked_chunk_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* The on disk metadata handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct dm_exception_store *store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct wait_queue_head in_progress_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct dm_kcopyd_client *kcopyd_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Wait for events based on state_bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned long state_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Range of chunks currently being merged. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) chunk_t first_merging_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int num_merging_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * The merge operation failed if this flag is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Failure modes are handled as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * - I/O error reading the header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * => don't load the target; abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * - Header does not have "valid" flag set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * => use the origin; forget about the snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * - I/O error when reading exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * => don't load the target; abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * (We can't use the intermediate origin state.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * - I/O error while merging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * => stop merging; set merge_failed; process I/O normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) bool merge_failed:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) bool discard_zeroes_cow:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bool discard_passdown_origin:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Incoming bios that overlap with chunks being merged must wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * for them to be committed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct bio_list bios_queued_during_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Flush data after merge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct bio flush_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * state_bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * RUNNING_MERGE - Merge operation is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * cleared afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define RUNNING_MERGE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define SHUTDOWN_MERGE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Maximum number of chunks being copied on write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * The value was decided experimentally as a trade-off between memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * consumption, stalling the kernel's workqueues and maintaining a high enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * throughput.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define DEFAULT_COW_THRESHOLD 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) "A percentage of time allocated for copy on write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return s->origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL(dm_snap_origin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return s->cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EXPORT_SYMBOL(dm_snap_cow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static sector_t chunk_to_sector(struct dm_exception_store *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return chunk << store->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * There is only ever one instance of a particular block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * device so we can compare pointers safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return lhs == rhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct dm_snap_pending_exception {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct dm_exception e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Origin buffers waiting for this to complete are held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * in a bio list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct bio_list origin_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct bio_list snapshot_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* Pointer back to snapshot context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct dm_snapshot *snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * 1 indicates the exception has already been sent to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * kcopyd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* There was copying error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int copy_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* A sequence number, it is used for in-order completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) sector_t exception_sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct rb_node out_of_order_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * For writing a complete chunk, bypassing the copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct bio *full_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bio_end_io_t *full_bio_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Hash table mapping origin volumes to lists of snapshots and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * a lock to protect it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static struct kmem_cache *exception_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static struct kmem_cache *pending_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct dm_snap_tracked_chunk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct hlist_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) chunk_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void init_tracked_chunk(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) INIT_HLIST_NODE(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static bool is_bio_tracked(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return !hlist_unhashed(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) c->chunk = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) spin_lock_irq(&s->tracked_chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) hlist_add_head(&c->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) spin_unlock_irq(&s->tracked_chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) spin_lock_irqsave(&s->tracked_chunk_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) hlist_del(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct dm_snap_tracked_chunk *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_lock_irq(&s->tracked_chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) hlist_for_each_entry(c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (c->chunk == chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) spin_unlock_irq(&s->tracked_chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * This conflicting I/O is extremely improbable in the caller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * so msleep(1) is sufficient and there is no need for a wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) while (__chunk_is_tracked(s, chunk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * One of these per registered origin, held in the snapshot_origins hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct origin {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* The origin device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct list_head hash_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* List of snapshots for this origin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct list_head snapshots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * This structure is allocated for each origin target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct dm_origin {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct dm_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned split_boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct list_head hash_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * Size of the hash table for origin volumes. If we make this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * the size of the minors list then it should be nearly perfect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define ORIGIN_HASH_SIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #define ORIGIN_MASK 0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static struct list_head *_origins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static struct list_head *_dm_origins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static struct rw_semaphore _origins_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static uint64_t _pending_exceptions_done_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int init_origin_hash(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) _origins = kmalloc_array(ORIGIN_HASH_SIZE, sizeof(struct list_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!_origins) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) DMERR("unable to allocate memory for _origins");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for (i = 0; i < ORIGIN_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) INIT_LIST_HEAD(_origins + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) _dm_origins = kmalloc_array(ORIGIN_HASH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) sizeof(struct list_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!_dm_origins) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) DMERR("unable to allocate memory for _dm_origins");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) kfree(_origins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) for (i = 0; i < ORIGIN_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) INIT_LIST_HEAD(_dm_origins + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) init_rwsem(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static void exit_origin_hash(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) kfree(_origins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) kfree(_dm_origins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static unsigned origin_hash(struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return bdev->bd_dev & ORIGIN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static struct origin *__lookup_origin(struct block_device *origin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct list_head *ol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct origin *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ol = &_origins[origin_hash(origin)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) list_for_each_entry (o, ol, hash_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (bdev_equal(o->bdev, origin))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static void __insert_origin(struct origin *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct list_head *sl = &_origins[origin_hash(o->bdev)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) list_add_tail(&o->hash_list, sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct list_head *ol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct dm_origin *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ol = &_dm_origins[origin_hash(origin)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) list_for_each_entry (o, ol, hash_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (bdev_equal(o->dev->bdev, origin))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void __insert_dm_origin(struct dm_origin *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) list_add_tail(&o->hash_list, sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void __remove_dm_origin(struct dm_origin *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) list_del(&o->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * _origins_lock must be held when calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * Returns number of snapshots registered using the supplied cow device, plus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * snap_src - a snapshot suitable for use as a source of exception handover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * snap_dest - a snapshot capable of receiving exception handover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * snap_merge - an existing snapshot-merge target linked to the same origin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * There can be at most one snapshot-merge target. The parameter is optional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * Possible return values and states of snap_src and snap_dest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * 0: NULL, NULL - first new snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * 1: snap_src, NULL - normal snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * 2: snap_src, snap_dest - waiting for handover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * 2: snap_src, NULL - handed over, waiting for old to be deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * 1: NULL, snap_dest - source got destroyed without handover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct dm_snapshot **snap_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct dm_snapshot **snap_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct dm_snapshot **snap_merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct dm_snapshot *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct origin *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) o = __lookup_origin(snap->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) list_for_each_entry(s, &o->snapshots, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *snap_merge = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) down_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) active = s->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) up_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (snap_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) *snap_src = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) } else if (snap_dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *snap_dest = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * On success, returns 1 if this snapshot is a handover destination,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * otherwise returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static int __validate_exception_handover(struct dm_snapshot *snap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct dm_snapshot *snap_merge = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* Does snapshot need exceptions handed over to it? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) &snap_merge) == 2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) snap_dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) snap->ti->error = "Snapshot cow pairing for exception "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) "table handover failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * If no snap_src was found, snap cannot become a handover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (!snap_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * Non-snapshot-merge handover?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!dm_target_is_snapshot_merge(snap->ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * Do not allow more than one merging snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (snap_merge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) snap->ti->error = "A snapshot is already merging.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (!snap_src->store->type->prepare_merge ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) !snap_src->store->type->commit_merge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) snap->ti->error = "Snapshot exception store does not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) "support snapshot-merge.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct dm_snapshot *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* Sort the list according to chunk size, largest-first smallest-last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) list_for_each_entry(l, &o->snapshots, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (l->store->chunk_size < s->store->chunk_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) list_add_tail(&s->list, &l->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Make a note of the snapshot and its origin so we can look it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * up when the origin has a write on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Also validate snapshot exception store handovers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * On success, returns 1 if this registration is a handover destination,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * otherwise returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static int register_snapshot(struct dm_snapshot *snap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct origin *o, *new_o = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct block_device *bdev = snap->origin->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!new_o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) down_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) r = __validate_exception_handover(snap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) kfree(new_o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) o = __lookup_origin(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) kfree(new_o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* New origin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) o = new_o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* Initialise the struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) INIT_LIST_HEAD(&o->snapshots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) o->bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) __insert_origin(o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) __insert_snapshot(o, snap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) up_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Move snapshot to correct place in list according to chunk size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static void reregister_snapshot(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct block_device *bdev = s->origin->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) down_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) list_del(&s->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) __insert_snapshot(__lookup_origin(bdev), s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) up_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static void unregister_snapshot(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct origin *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) down_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) o = __lookup_origin(s->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) list_del(&s->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (o && list_empty(&o->snapshots)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) list_del(&o->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) kfree(o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) up_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * Implementation of the exception hash tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * The lowest hash_shift bits of the chunk number are ignored, allowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * some consecutive chunks to be grouped together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Lock to protect access to the completed and pending exception hash tables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct dm_exception_table_lock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct hlist_bl_head *complete_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct hlist_bl_head *pending_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct dm_exception_table_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct dm_exception_table *complete = &s->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct dm_exception_table *pending = &s->pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) hlist_bl_lock(lock->complete_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) hlist_bl_lock(lock->pending_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) hlist_bl_unlock(lock->pending_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) hlist_bl_unlock(lock->complete_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static int dm_exception_table_init(struct dm_exception_table *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) uint32_t size, unsigned hash_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) et->hash_shift = hash_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) et->hash_mask = size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (!et->table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) INIT_HLIST_BL_HEAD(et->table + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static void dm_exception_table_exit(struct dm_exception_table *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct kmem_cache *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct hlist_bl_head *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct dm_exception *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct hlist_bl_node *pos, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) size = et->hash_mask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) slot = et->table + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) kmem_cache_free(mem, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) vfree(et->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return (chunk >> et->hash_shift) & et->hash_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static void dm_remove_exception(struct dm_exception *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) hlist_bl_del(&e->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * Return the exception data for a sector, or NULL if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * remapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct hlist_bl_head *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct hlist_bl_node *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct dm_exception *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) slot = &et->table[exception_hash(et, chunk)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) hlist_bl_for_each_entry(e, pos, slot, hash_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (chunk >= e->old_chunk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static struct dm_exception *alloc_completed_exception(gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct dm_exception *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) e = kmem_cache_alloc(exception_cache, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!e && gfp == GFP_NOIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static void free_completed_exception(struct dm_exception *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) kmem_cache_free(exception_cache, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) atomic_inc(&s->pending_exceptions_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) pe->snap = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static void free_pending_exception(struct dm_snap_pending_exception *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct dm_snapshot *s = pe->snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) mempool_free(pe, &s->pending_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) atomic_dec(&s->pending_exceptions_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static void dm_insert_exception(struct dm_exception_table *eh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct dm_exception *new_e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct hlist_bl_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct hlist_bl_node *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct dm_exception *e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) l = &eh->table[exception_hash(eh, new_e->old_chunk)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* Add immediately if this table doesn't support consecutive chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (!eh->hash_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* List is ordered by old_chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) hlist_bl_for_each_entry(e, pos, l, hash_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* Insert after an existing chunk? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (new_e->old_chunk == (e->old_chunk +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dm_consecutive_chunk_count(e) + 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) dm_consecutive_chunk_count(e) + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) dm_consecutive_chunk_count_inc(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) free_completed_exception(new_e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* Insert before an existing chunk? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (new_e->old_chunk == (e->old_chunk - 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) dm_consecutive_chunk_count_inc(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) e->old_chunk--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) e->new_chunk--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) free_completed_exception(new_e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (new_e->old_chunk < e->old_chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * Either the table doesn't support consecutive chunks or slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * l is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) hlist_bl_add_head(&new_e->hash_list, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) } else if (new_e->old_chunk < e->old_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* Add before an existing exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* Add to l's tail: e is the last exception in this slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * Callback used by the exception stores to load exceptions when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * initialising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static int dm_add_exception(void *context, chunk_t old, chunk_t new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct dm_exception_table_lock lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct dm_snapshot *s = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct dm_exception *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) e = alloc_completed_exception(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) e->old_chunk = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Consecutive_count is implicitly initialised to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) e->new_chunk = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Although there is no need to lock access to the exception tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * here, if we don't then hlist_bl_add_head(), called by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * dm_insert_exception(), will complain about accessing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * corresponding list without locking it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dm_exception_table_lock_init(s, old, &lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) dm_exception_table_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) dm_insert_exception(&s->complete, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * Return a minimum chunk size of all snapshots that have the specified origin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * Return zero if the origin has no snapshots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static uint32_t __minimum_chunk_size(struct origin *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct dm_snapshot *snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) list_for_each_entry(snap, &o->snapshots, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) chunk_size = min_not_zero(chunk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) snap->store->chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return (uint32_t) chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Hard coded magic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static int calc_max_buckets(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* use a fixed size of 2MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) unsigned long mem = 2 * 1024 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) mem /= sizeof(struct hlist_bl_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * Allocate room for a suitable hash table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static int init_hash_tables(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) sector_t hash_size, cow_dev_size, max_buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * Calculate based on the size of the original volume or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * the COW volume...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) cow_dev_size = get_dev_size(s->cow->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) max_buckets = calc_max_buckets();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) hash_size = cow_dev_size >> s->store->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) hash_size = min(hash_size, max_buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (hash_size < 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) hash_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) hash_size = rounddown_pow_of_two(hash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (dm_exception_table_init(&s->complete, hash_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) DM_CHUNK_CONSECUTIVE_BITS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * Allocate hash table for in-flight exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * Make this smaller than the real hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) hash_size >>= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (hash_size < 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) hash_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (dm_exception_table_init(&s->pending, hash_size, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) dm_exception_table_exit(&s->complete, exception_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void merge_shutdown(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) wake_up_bit(&s->state_bits, RUNNING_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) s->first_merging_chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) s->num_merging_chunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return bio_list_get(&s->bios_queued_during_merge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * Remove one chunk from the index of completed exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) static int __remove_single_exception_chunk(struct dm_snapshot *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) chunk_t old_chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct dm_exception *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) e = dm_lookup_exception(&s->complete, old_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) DMERR("Corruption detected: exception for block %llu is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) "on disk but not in memory",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) (unsigned long long)old_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * If this is the only chunk using this exception, remove exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (!dm_consecutive_chunk_count(e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) dm_remove_exception(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) free_completed_exception(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * The chunk may be either at the beginning or the end of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * group of consecutive chunks - never in the middle. We are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * removing chunks in the opposite order to that in which they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * were added, so this should always be true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * Decrement the consecutive chunk counter and adjust the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * starting point if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (old_chunk == e->old_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) e->old_chunk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) e->new_chunk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) } else if (old_chunk != e->old_chunk +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) dm_consecutive_chunk_count(e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) DMERR("Attempt to merge block %llu from the "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) "middle of a chunk range [%llu - %llu]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) (unsigned long long)old_chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) (unsigned long long)e->old_chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) e->old_chunk + dm_consecutive_chunk_count(e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) dm_consecutive_chunk_count_dec(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static void flush_bios(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static int remove_single_exception_chunk(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct bio *b = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) down_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * Process chunks (and associated exceptions) in reverse order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * so that dm_consecutive_chunk_count_dec() accounting works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) r = __remove_single_exception_chunk(s, old_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) } while (old_chunk-- > s->first_merging_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) b = __release_queued_bios_after_merge(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) up_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) flush_bios(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static int origin_write_extent(struct dm_snapshot *merging_snap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) sector_t sector, unsigned chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static void merge_callback(int read_err, unsigned long write_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static uint64_t read_pending_exceptions_done_count(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) uint64_t pending_exceptions_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) spin_lock(&_pending_exceptions_done_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) pending_exceptions_done = _pending_exceptions_done_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) spin_unlock(&_pending_exceptions_done_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return pending_exceptions_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static void increment_pending_exceptions_done_count(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) spin_lock(&_pending_exceptions_done_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) _pending_exceptions_done_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) spin_unlock(&_pending_exceptions_done_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) wake_up_all(&_pending_exceptions_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static void snapshot_merge_next_chunks(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) int i, linear_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) chunk_t old_chunk, new_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct dm_io_region src, dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) sector_t io_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) uint64_t previous_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) goto shut;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * valid flag never changes during merge, so no lock required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!s->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) DMERR("Snapshot is invalid: can't merge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto shut;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) &new_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (linear_chunks <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (linear_chunks < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) DMERR("Read error in exception store: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) "shutting down merge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) down_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) s->merge_failed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) up_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) goto shut;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* Adjust old_chunk and new_chunk to reflect start of linear region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) old_chunk = old_chunk + 1 - linear_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) new_chunk = new_chunk + 1 - linear_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * Use one (potentially large) I/O to copy all 'linear_chunks'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * from the exception store to the origin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) io_size = linear_chunks * s->store->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) dest.bdev = s->origin->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) dest.sector = chunk_to_sector(s->store, old_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) src.bdev = s->cow->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) src.sector = chunk_to_sector(s->store, new_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) src.count = dest.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * Reallocate any exceptions needed in other snapshots then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * wait for the pending exceptions to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * Each time any pending exception (globally on the system)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * completes we are woken and repeat the process to find out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * if we can proceed. While this may not seem a particularly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * efficient algorithm, it is not expected to have any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * significant impact on performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) previous_count = read_pending_exceptions_done_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) while (origin_write_extent(s, dest.sector, io_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) wait_event(_pending_exceptions_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) (read_pending_exceptions_done_count() !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) previous_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /* Retry after the wait, until all exceptions are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) previous_count = read_pending_exceptions_done_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) down_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) s->first_merging_chunk = old_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) s->num_merging_chunks = linear_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) up_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /* Wait until writes to all 'linear_chunks' drain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) for (i = 0; i < linear_chunks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) __check_for_conflicting_io(s, old_chunk + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 1 << DM_KCOPYD_SNAP_MERGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) merge_callback, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) shut:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) merge_shutdown(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static void error_bios(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static int flush_data(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct bio *flush_bio = &s->flush_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) bio_reset(flush_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) bio_set_dev(flush_bio, s->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return submit_bio_wait(flush_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static void merge_callback(int read_err, unsigned long write_err, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct dm_snapshot *s = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct bio *b = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (read_err || write_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (read_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) DMERR("Read error: shutting down merge.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) DMERR("Write error: shutting down merge.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) goto shut;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (flush_data(s) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) DMERR("Flush after merge failed: shutting down merge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) goto shut;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (s->store->type->commit_merge(s->store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) s->num_merging_chunks) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) DMERR("Write error in exception store: shutting down merge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) goto shut;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (remove_single_exception_chunk(s) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) goto shut;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) snapshot_merge_next_chunks(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) shut:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) down_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) s->merge_failed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) b = __release_queued_bios_after_merge(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) up_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) error_bios(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) merge_shutdown(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static void start_merge(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) snapshot_merge_next_chunks(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * Stop the merging process and wait until it finishes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) static void stop_merge(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) set_bit(SHUTDOWN_MERGE, &s->state_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) clear_bit(SHUTDOWN_MERGE, &s->state_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) unsigned argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) const char *arg_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static const struct dm_arg _args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {0, 2, "Invalid number of feature arguments"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * No feature arguments supplied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (!as->argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) r = dm_read_arg_group(_args, as, &argc, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) while (argc && !r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) arg_name = dm_shift_arg(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (!strcasecmp(arg_name, "discard_zeroes_cow"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) s->discard_zeroes_cow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) else if (!strcasecmp(arg_name, "discard_passdown_origin"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) s->discard_passdown_origin = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ti->error = "Unrecognised feature requested";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (!s->discard_zeroes_cow && s->discard_passdown_origin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * TODO: really these are disjoint.. but ti->num_discard_bios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * and dm_bio_get_target_bio_nr() require rigid constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ti->error = "discard_passdown_origin feature depends on discard_zeroes_cow";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * Construct a snapshot mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct dm_snapshot *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct dm_arg_set as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) char *origin_path, *cow_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) dev_t origin_dev, cow_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) unsigned args_used, num_flush_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) fmode_t origin_mode = FMODE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (argc < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) ti->error = "requires 4 or more arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (dm_target_is_snapshot_merge(ti)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) num_flush_bios = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) origin_mode = FMODE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) s = kzalloc(sizeof(*s), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) ti->error = "Cannot allocate private snapshot structure";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) as.argc = argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) as.argv = argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) dm_consume_args(&as, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) r = parse_snapshot_features(&as, s, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) goto bad_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) origin_path = argv[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) argv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) ti->error = "Cannot get origin device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) goto bad_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) origin_dev = s->origin->bdev->bd_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) cow_path = argv[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) argv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) cow_dev = dm_get_dev_t(cow_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (cow_dev && cow_dev == origin_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) ti->error = "COW device cannot be the same as origin device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) goto bad_cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) ti->error = "Cannot get COW device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) goto bad_cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) ti->error = "Couldn't create exception store";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) goto bad_store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) argv += args_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) argc -= args_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) s->ti = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) s->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) s->snapshot_overflowed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) s->active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) atomic_set(&s->pending_exceptions_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) spin_lock_init(&s->pe_allocation_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) s->exception_start_sequence = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) s->exception_complete_sequence = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) s->out_of_order_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) init_rwsem(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) INIT_LIST_HEAD(&s->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) spin_lock_init(&s->pe_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) s->state_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) s->merge_failed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) s->first_merging_chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) s->num_merging_chunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) bio_list_init(&s->bios_queued_during_merge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) bio_init(&s->flush_bio, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /* Allocate hash table for COW data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (init_hash_tables(s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) ti->error = "Unable to allocate hash table space";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) goto bad_hash_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) init_waitqueue_head(&s->in_progress_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (IS_ERR(s->kcopyd_client)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) r = PTR_ERR(s->kcopyd_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) ti->error = "Could not create kcopyd client";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) goto bad_kcopyd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) ti->error = "Could not allocate mempool for pending exceptions";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) goto bad_pending_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) spin_lock_init(&s->tracked_chunk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ti->private = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) ti->num_flush_bios = num_flush_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (s->discard_zeroes_cow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /* Add snapshot to the list of snapshots for this origin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* Exceptions aren't triggered till snapshot_resume() is called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) r = register_snapshot(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (r == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ti->error = "Snapshot origin struct allocation failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) goto bad_load_and_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) } else if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* invalid handover, register_snapshot has set ti->error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) goto bad_load_and_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Metadata must only be loaded into one table at once, so skip this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * if metadata will be handed over during resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * Chunk size will be set during the handover - set it to zero to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * ensure it's ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (r > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) s->store->chunk_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) r = s->store->type->read_metadata(s->store, dm_add_exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) (void *)s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ti->error = "Failed to read snapshot metadata";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) goto bad_read_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) } else if (r > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) s->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) DMWARN("Snapshot is marked invalid.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (!s->store->chunk_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) ti->error = "Chunk size not set";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) goto bad_read_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) r = dm_set_target_max_io_len(ti, s->store->chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) goto bad_read_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) bad_read_metadata:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) unregister_snapshot(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) bad_load_and_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) mempool_exit(&s->pending_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) bad_pending_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) dm_kcopyd_client_destroy(s->kcopyd_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) bad_kcopyd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) dm_exception_table_exit(&s->pending, pending_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) dm_exception_table_exit(&s->complete, exception_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) bad_hash_tables:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) dm_exception_store_destroy(s->store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) bad_store:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) dm_put_device(ti, s->cow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) bad_cow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) dm_put_device(ti, s->origin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) bad_origin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) bad_features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static void __free_exceptions(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) dm_kcopyd_client_destroy(s->kcopyd_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) s->kcopyd_client = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) dm_exception_table_exit(&s->pending, pending_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) dm_exception_table_exit(&s->complete, exception_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static void __handover_exceptions(struct dm_snapshot *snap_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) struct dm_snapshot *snap_dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct dm_exception_table table_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct dm_exception_store *store_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * Swap all snapshot context information between the two instances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) u.table_swap = snap_dest->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) snap_dest->complete = snap_src->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) snap_src->complete = u.table_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) u.store_swap = snap_dest->store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) snap_dest->store = snap_src->store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) snap_src->store = u.store_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) snap_dest->store->snap = snap_dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) snap_src->store->snap = snap_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) snap_dest->valid = snap_src->valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * Set source invalid to ensure it receives no further I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) snap_src->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) static void snapshot_dtr(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) #ifdef CONFIG_DM_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct dm_snapshot *s = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) down_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /* Check whether exception handover must be cancelled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (snap_src && snap_dest && (s == snap_src)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) down_write(&snap_dest->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) snap_dest->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) up_write(&snap_dest->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) DMERR("Cancelling snapshot handover.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) up_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (dm_target_is_snapshot_merge(ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) stop_merge(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /* Prevent further origin writes from using this snapshot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /* After this returns there can be no new kcopyd jobs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) unregister_snapshot(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) while (atomic_read(&s->pending_exceptions_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * Ensure instructions in mempool_exit aren't reordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * before atomic_read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) #ifdef CONFIG_DM_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) __free_exceptions(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) mempool_exit(&s->pending_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) dm_exception_store_destroy(s->store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) bio_uninit(&s->flush_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) dm_put_device(ti, s->cow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) dm_put_device(ti, s->origin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) WARN_ON(s->in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static void account_start_copy(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) spin_lock(&s->in_progress_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) s->in_progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) spin_unlock(&s->in_progress_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static void account_end_copy(struct dm_snapshot *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) spin_lock(&s->in_progress_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) BUG_ON(!s->in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) s->in_progress--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (likely(s->in_progress <= cow_threshold) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) unlikely(waitqueue_active(&s->in_progress_wait)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) wake_up_locked(&s->in_progress_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) spin_unlock(&s->in_progress_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (unlikely(s->in_progress > cow_threshold)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) spin_lock(&s->in_progress_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) if (likely(s->in_progress > cow_threshold)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * NOTE: this throttle doesn't account for whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * the caller is servicing an IO that will trigger a COW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * so excess throttling may result for chunks not required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * to be COW'd. But if cow_threshold was reached, extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * throttling is unlikely to negatively impact performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) __add_wait_queue(&s->in_progress_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) __set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) spin_unlock(&s->in_progress_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (unlock_origins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) up_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) remove_wait_queue(&s->in_progress_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) spin_unlock(&s->in_progress_wait.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * Flush a list of buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) static void flush_bios(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) struct bio *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) while (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) n = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) bio = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * Flush a list of buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct bio *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) while (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) n = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) r = do_origin(s->origin, bio, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (r == DM_MAPIO_REMAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) bio = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * Error a list of buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static void error_bios(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct bio *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) while (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) n = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) bio = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) static void __invalidate_snapshot(struct dm_snapshot *s, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (!s->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (err == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) DMERR("Invalidating snapshot: Error reading/writing.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) else if (err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) DMERR("Invalidating snapshot: Unable to allocate exception.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (s->store->type->drop_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) s->store->type->drop_snapshot(s->store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) s->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) dm_table_event(s->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) static void invalidate_snapshot(struct dm_snapshot *s, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) down_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) __invalidate_snapshot(s, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) up_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) static void pending_complete(void *context, int success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct dm_snap_pending_exception *pe = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct dm_exception *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct dm_snapshot *s = pe->snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct bio *origin_bios = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct bio *snapshot_bios = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct bio *full_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) struct dm_exception_table_lock lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (!success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) /* Read/write error - snapshot is unusable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) invalidate_snapshot(s, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) dm_exception_table_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) e = alloc_completed_exception(GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) invalidate_snapshot(s, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) dm_exception_table_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) *e = pe->e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) down_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) dm_exception_table_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (!s->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) up_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) free_completed_exception(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * Add a proper exception. After inserting the completed exception all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * subsequent snapshot reads to this chunk will be redirected to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * COW device. This ensures that we do not starve. Moreover, as long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * as the pending exception exists, neither origin writes nor snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * merging can overwrite the chunk in origin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) dm_insert_exception(&s->complete, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) up_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* Wait for conflicting reads to drain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (__chunk_is_tracked(s, pe->e.old_chunk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) __check_for_conflicting_io(s, pe->e.old_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) dm_exception_table_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /* Remove the in-flight exception from the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) dm_remove_exception(&pe->e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) snapshot_bios = bio_list_get(&pe->snapshot_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) origin_bios = bio_list_get(&pe->origin_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) full_bio = pe->full_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (full_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) full_bio->bi_end_io = pe->full_bio_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) increment_pending_exceptions_done_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) /* Submit any pending write bios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (full_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) bio_io_error(full_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) error_bios(snapshot_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (full_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) bio_endio(full_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) flush_bios(snapshot_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) retry_origin_bios(s, origin_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) free_pending_exception(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static void complete_exception(struct dm_snap_pending_exception *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct dm_snapshot *s = pe->snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /* Update the metadata if we are persistent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) pending_complete, pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * Called when the copy I/O has finished. kcopyd actually runs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * this code so don't block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) static void copy_callback(int read_err, unsigned long write_err, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) struct dm_snap_pending_exception *pe = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct dm_snapshot *s = pe->snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) pe->copy_error = read_err || write_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (pe->exception_sequence == s->exception_complete_sequence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) s->exception_complete_sequence++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) complete_exception(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) next = rb_first(&s->out_of_order_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) pe = rb_entry(next, struct dm_snap_pending_exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) out_of_order_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (pe->exception_sequence != s->exception_complete_sequence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) next = rb_next(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) s->exception_complete_sequence++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) rb_erase(&pe->out_of_order_node, &s->out_of_order_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) complete_exception(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) struct rb_node **p = &s->out_of_order_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) struct dm_snap_pending_exception *pe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) pe2 = rb_entry(*p, struct dm_snap_pending_exception, out_of_order_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) BUG_ON(pe->exception_sequence == pe2->exception_sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (pe->exception_sequence < pe2->exception_sequence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) p = &((*p)->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) p = &((*p)->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) rb_link_node(&pe->out_of_order_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) account_end_copy(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) * Dispatches the copy operation to kcopyd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) static void start_copy(struct dm_snap_pending_exception *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) struct dm_snapshot *s = pe->snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct dm_io_region src, dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct block_device *bdev = s->origin->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) sector_t dev_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) dev_size = get_dev_size(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) src.bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) dest.bdev = s->cow->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) dest.count = src.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /* Hand over to kcopyd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) account_start_copy(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) static void full_bio_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) void *callback_data = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static void start_full_bio(struct dm_snap_pending_exception *pe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) struct dm_snapshot *s = pe->snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) void *callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) pe->full_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) pe->full_bio_end_io = bio->bi_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) account_start_copy(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) copy_callback, pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) bio->bi_end_io = full_bio_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) bio->bi_private = callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static struct dm_snap_pending_exception *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) return container_of(e, struct dm_snap_pending_exception, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * Inserts a pending exception into the pending table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * NOTE: a write lock must be held on the chunk's pending exception table slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * before calling this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) static struct dm_snap_pending_exception *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) __insert_pending_exception(struct dm_snapshot *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) struct dm_snap_pending_exception *pe, chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) pe->e.old_chunk = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) bio_list_init(&pe->origin_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) bio_list_init(&pe->snapshot_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) pe->started = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) pe->full_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) spin_lock(&s->pe_allocation_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (s->store->type->prepare_exception(s->store, &pe->e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) spin_unlock(&s->pe_allocation_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) free_pending_exception(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) pe->exception_sequence = s->exception_start_sequence++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) spin_unlock(&s->pe_allocation_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) dm_insert_exception(&s->pending, &pe->e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) return pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * Looks to see if this snapshot already has a pending exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * for this chunk, otherwise it allocates a new one and inserts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * it into the pending table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * NOTE: a write lock must be held on the chunk's pending exception table slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * before calling this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) static struct dm_snap_pending_exception *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) __find_pending_exception(struct dm_snapshot *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) struct dm_snap_pending_exception *pe, chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) struct dm_snap_pending_exception *pe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) pe2 = __lookup_pending_exception(s, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (pe2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) free_pending_exception(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) return pe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) return __insert_pending_exception(s, pe, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct bio *bio, chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) bio_set_dev(bio, s->cow->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) bio->bi_iter.bi_sector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) (chunk - e->old_chunk)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) (bio->bi_iter.bi_sector & s->store->chunk_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) static void zero_callback(int read_err, unsigned long write_err, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) struct bio *bio = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) struct dm_snapshot *s = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) account_end_copy(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) bio->bi_status = write_err ? BLK_STS_IOERR : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) struct bio *bio, chunk_t chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) struct dm_io_region dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) dest.bdev = s->cow->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) dest.sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) dest.count = s->store->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) account_start_copy(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) WARN_ON_ONCE(bio->bi_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) bio->bi_private = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return bio->bi_iter.bi_size ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) (s->store->chunk_size << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static int snapshot_map(struct dm_target *ti, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) struct dm_exception *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct dm_snapshot *s = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) int r = DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) chunk_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) struct dm_snap_pending_exception *pe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct dm_exception_table_lock lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) init_tracked_chunk(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (bio->bi_opf & REQ_PREFLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) bio_set_dev(bio, s->cow->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) dm_exception_table_lock_init(s, chunk, &lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) /* Full snapshots are not usable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /* To get here the table must be live so s->active is always set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (!s->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (bio_data_dir(bio) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) while (unlikely(!wait_for_in_progress(s, false)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) ; /* wait_for_in_progress() has slept */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) down_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) dm_exception_table_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (!s->valid || (unlikely(s->snapshot_overflowed) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) bio_data_dir(bio) == WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) r = DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * passdown discard to origin (without triggering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * snapshot exceptions via do_origin; doing so would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * defeat the goal of freeing space in origin that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * implied by the "discard_passdown_origin" feature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) bio_set_dev(bio, s->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) track_chunk(s, bio, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /* discard to snapshot (target_bio_nr == 0) zeroes exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) /* If the block is already remapped - use that, else remap it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) e = dm_lookup_exception(&s->complete, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) remap_exception(s, e, bio, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) io_overlaps_chunk(s, bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) up_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) zero_exception(s, e, bio, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) r = DM_MAPIO_SUBMITTED; /* discard is not issued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * If no exception exists, complete discard immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * otherwise it'll trigger copy-out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) r = DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * Write to snapshot - higher level takes care of RW/RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * flags so we should only get this if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * writeable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (bio_data_dir(bio) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) pe = __lookup_pending_exception(s, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if (!pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) pe = alloc_pending_exception(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) dm_exception_table_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) e = dm_lookup_exception(&s->complete, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) free_pending_exception(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) remap_exception(s, e, bio, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) pe = __find_pending_exception(s, pe, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (!pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) up_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) down_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (s->store->userspace_supports_overflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (s->valid && !s->snapshot_overflowed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) s->snapshot_overflowed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) DMERR("Snapshot overflowed: Unable to allocate exception.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) __invalidate_snapshot(s, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) up_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) r = DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) remap_exception(s, &pe->e, bio, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) r = DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (!pe->started && io_overlaps_chunk(s, bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) pe->started = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) up_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) start_full_bio(pe, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) bio_list_add(&pe->snapshot_bios, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (!pe->started) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /* this is protected by the exception table lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) pe->started = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) up_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) start_copy(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) bio_set_dev(bio, s->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) track_chunk(s, bio, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) up_read(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * A snapshot-merge target behaves like a combination of a snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * target and a snapshot-origin target. It only generates new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * exceptions in other snapshots and not in the one that is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) * merged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * For each chunk, if there is an existing exception, it is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * which in turn might generate exceptions in other snapshots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * If merging is currently taking place on the chunk in question, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * I/O is deferred by adding it to s->bios_queued_during_merge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) struct dm_exception *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct dm_snapshot *s = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) int r = DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) chunk_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) init_tracked_chunk(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (bio->bi_opf & REQ_PREFLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (!dm_bio_get_target_bio_nr(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) bio_set_dev(bio, s->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) bio_set_dev(bio, s->cow->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) /* Once merging, discards no longer effect change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) down_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /* Full merging snapshots are redirected to the origin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) if (!s->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) goto redirect_to_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) /* If the block is already remapped - use that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) e = dm_lookup_exception(&s->complete, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /* Queue writes overlapping with chunks being merged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (bio_data_dir(bio) == WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) chunk >= s->first_merging_chunk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) chunk < (s->first_merging_chunk +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) s->num_merging_chunks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) bio_set_dev(bio, s->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) bio_list_add(&s->bios_queued_during_merge, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) r = DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) remap_exception(s, e, bio, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (bio_data_dir(bio) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) track_chunk(s, bio, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) redirect_to_origin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) bio_set_dev(bio, s->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (bio_data_dir(bio) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) up_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) return do_origin(s->origin, bio, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) up_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) blk_status_t *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) struct dm_snapshot *s = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (is_bio_tracked(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) stop_tracking_chunk(s, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) return DM_ENDIO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) static void snapshot_merge_presuspend(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) struct dm_snapshot *s = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) stop_merge(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) static int snapshot_preresume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) struct dm_snapshot *s = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) down_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (snap_src && snap_dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) down_read(&snap_src->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) if (s == snap_src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) DMERR("Unable to resume snapshot source until "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) "handover completes.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) } else if (!dm_suspended(snap_src->ti)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) DMERR("Unable to perform snapshot handover until "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) "source is suspended.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) up_read(&snap_src->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) up_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) static void snapshot_resume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) struct dm_snapshot *s = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) struct dm_origin *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) struct mapped_device *origin_md = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) bool must_restart_merging = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) down_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) o = __lookup_dm_origin(s->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) origin_md = dm_table_get_md(o->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if (!origin_md) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (snap_merging)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) origin_md = dm_table_get_md(snap_merging->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) if (origin_md == dm_table_get_md(ti->table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) origin_md = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (origin_md) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (dm_hold(origin_md))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) origin_md = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) up_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (origin_md) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) dm_internal_suspend_fast(origin_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) must_restart_merging = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) stop_merge(snap_merging);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) down_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (snap_src && snap_dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) down_write(&snap_src->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) __handover_exceptions(snap_src, snap_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) up_write(&snap_dest->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) up_write(&snap_src->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) up_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (origin_md) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (must_restart_merging)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) start_merge(snap_merging);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) dm_internal_resume_fast(origin_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) dm_put(origin_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) /* Now we have correct chunk size, reregister */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) reregister_snapshot(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) down_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) s->active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) up_write(&s->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) uint32_t min_chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) down_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) up_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) return min_chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) static void snapshot_merge_resume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) struct dm_snapshot *s = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) * Handover exceptions from existing snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) snapshot_resume(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) * snapshot-merge acts as an origin, so set ti->max_io_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) start_merge(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) static void snapshot_status(struct dm_target *ti, status_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) unsigned status_flags, char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) unsigned sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) struct dm_snapshot *snap = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) unsigned num_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) down_write(&snap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (!snap->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) DMEMIT("Invalid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) else if (snap->merge_failed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) DMEMIT("Merge failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) else if (snap->snapshot_overflowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) DMEMIT("Overflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (snap->store->type->usage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) sector_t total_sectors, sectors_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) metadata_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) snap->store->type->usage(snap->store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) &total_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) §ors_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) &metadata_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) DMEMIT("%llu/%llu %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) (unsigned long long)sectors_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) (unsigned long long)total_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) (unsigned long long)metadata_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) DMEMIT("Unknown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) up_write(&snap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) case STATUSTYPE_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) * kdevname returns a static pointer so we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * to make private copies if the output is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * make sense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) DMEMIT("%s %s", snap->origin->name, snap->cow->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) sz += snap->store->type->status(snap->store, type, result + sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) maxlen - sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) num_features = snap->discard_zeroes_cow + snap->discard_passdown_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) if (num_features) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) DMEMIT(" %u", num_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (snap->discard_zeroes_cow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) DMEMIT(" discard_zeroes_cow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (snap->discard_passdown_origin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) DMEMIT(" discard_passdown_origin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) static int snapshot_iterate_devices(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) iterate_devices_callout_fn fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) struct dm_snapshot *snap = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) r = fn(ti, snap->origin, 0, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) struct dm_snapshot *snap = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (snap->discard_zeroes_cow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) down_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) (void) __find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) if (snap_src && snap_dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) snap = snap_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) /* All discards are split on chunk_size boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) limits->discard_granularity = snap->store->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) limits->max_discard_sectors = snap->store->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) up_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) * Origin methods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) * supplied bio was ignored. The caller may submit it immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) * (No remapping actually occurs as the origin is always a direct linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) * map.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) * and any supplied bio is added to a list to be submitted once all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * the necessary exceptions exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) static int __origin_write(struct list_head *snapshots, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) int r = DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) struct dm_snapshot *snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) struct dm_exception *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) struct dm_snap_pending_exception *pe, *pe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) struct dm_snap_pending_exception *pe_to_start_now = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) struct dm_snap_pending_exception *pe_to_start_last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) struct dm_exception_table_lock lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) chunk_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) /* Do all the snapshots on this origin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) list_for_each_entry (snap, snapshots, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) * Don't make new exceptions in a merging snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) * because it has effectively been deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (dm_target_is_snapshot_merge(snap->ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) /* Nothing to do if writing beyond end of snapshot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (sector >= dm_table_get_size(snap->ti->table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * Remember, different snapshots can have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * different chunk sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) chunk = sector_to_chunk(snap->store, sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) dm_exception_table_lock_init(snap, chunk, &lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) down_read(&snap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) dm_exception_table_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) /* Only deal with valid and active snapshots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (!snap->valid || !snap->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) goto next_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) pe = __lookup_pending_exception(snap, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (!pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) * Check exception table to see if block is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) * remapped in this snapshot and trigger an exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) * if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) e = dm_lookup_exception(&snap->complete, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) if (e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) goto next_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) pe = alloc_pending_exception(snap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) dm_exception_table_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) pe2 = __lookup_pending_exception(snap, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (!pe2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) e = dm_lookup_exception(&snap->complete, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) free_pending_exception(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) goto next_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) pe = __insert_pending_exception(snap, pe, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (!pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) up_read(&snap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) invalidate_snapshot(snap, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) free_pending_exception(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) pe = pe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) r = DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) * If an origin bio was supplied, queue it to wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) * completion of this exception, and start this one last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) * at the end of the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) bio_list_add(&pe->origin_bios, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) if (!pe->started) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) pe->started = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) pe_to_start_last = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (!pe->started) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) pe->started = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) pe_to_start_now = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) next_snapshot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) dm_exception_table_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) up_read(&snap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (pe_to_start_now) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) start_copy(pe_to_start_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) pe_to_start_now = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) * Submit the exception against which the bio is queued last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) * to give the other exceptions a head start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (pe_to_start_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) start_copy(pe_to_start_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * Called on a write from the origin driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) struct origin *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) int r = DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) down_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) o = __lookup_origin(origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) if (o) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) struct dm_snapshot *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) list_for_each_entry(s, &o->snapshots, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) if (unlikely(!wait_for_in_progress(s, true)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) up_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) * Trigger exceptions in all non-merging snapshots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * The chunk size of the merging snapshot may be larger than the chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) * size of some other snapshot so we may need to reallocate multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) * chunks in other snapshots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) * We scan all the overlapping exceptions in the other snapshots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) * Returns 1 if anything was reallocated and must be waited for,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) * otherwise returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) * size must be a multiple of merging_snap's chunk_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) static int origin_write_extent(struct dm_snapshot *merging_snap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) sector_t sector, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) int must_wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) sector_t n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) struct origin *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * The origin's __minimum_chunk_size() got stored in max_io_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) * by snapshot_merge_resume().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) down_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) o = __lookup_origin(merging_snap->origin->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) for (n = 0; n < size; n += merging_snap->ti->max_io_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) if (__origin_write(&o->snapshots, sector + n, NULL) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) DM_MAPIO_SUBMITTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) must_wait = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) up_read(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) return must_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) * Origin: maps a linear range of a device, with hooks for snapshotting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * Construct an origin mapping: <dev_path>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) * The context for an origin is merely a 'struct dm_dev *'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) * pointing to the real device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) struct dm_origin *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (argc != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) ti->error = "origin: incorrect number of arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) if (!o) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) ti->error = "Cannot allocate private origin structure";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) goto bad_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) ti->error = "Cannot get target device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) goto bad_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) o->ti = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) ti->private = o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) ti->num_flush_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) bad_open:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) kfree(o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) bad_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) static void origin_dtr(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) struct dm_origin *o = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) dm_put_device(ti, o->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) kfree(o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) static int origin_map(struct dm_target *ti, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) struct dm_origin *o = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) unsigned available_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) bio_set_dev(bio, o->dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) if (unlikely(bio->bi_opf & REQ_PREFLUSH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) if (bio_data_dir(bio) != WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) available_sectors = o->split_boundary -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) if (bio_sectors(bio) > available_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) dm_accept_partial_bio(bio, available_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) /* Only tell snapshots if this is a write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) return do_origin(o->dev, bio, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) * Set the target "max_io_len" field to the minimum of all the snapshots'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * chunk sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) static void origin_resume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) struct dm_origin *o = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) down_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) __insert_dm_origin(o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) up_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) static void origin_postsuspend(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) struct dm_origin *o = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) down_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) __remove_dm_origin(o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) up_write(&_origins_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) static void origin_status(struct dm_target *ti, status_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) unsigned status_flags, char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) struct dm_origin *o = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) result[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) case STATUSTYPE_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) snprintf(result, maxlen, "%s", o->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) static int origin_iterate_devices(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) iterate_devices_callout_fn fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) struct dm_origin *o = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) return fn(ti, o->dev, 0, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) static struct target_type origin_target = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) .name = "snapshot-origin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) .version = {1, 9, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) .ctr = origin_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) .dtr = origin_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) .map = origin_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) .resume = origin_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) .postsuspend = origin_postsuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) .status = origin_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) .iterate_devices = origin_iterate_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) static struct target_type snapshot_target = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) .name = "snapshot",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) .version = {1, 16, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) .ctr = snapshot_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) .dtr = snapshot_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) .map = snapshot_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) .end_io = snapshot_end_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) .preresume = snapshot_preresume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) .resume = snapshot_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) .status = snapshot_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) .iterate_devices = snapshot_iterate_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) .io_hints = snapshot_io_hints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) static struct target_type merge_target = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) .name = dm_snapshot_merge_target_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) .version = {1, 5, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) .ctr = snapshot_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) .dtr = snapshot_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) .map = snapshot_merge_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) .end_io = snapshot_end_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) .presuspend = snapshot_merge_presuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) .preresume = snapshot_preresume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) .resume = snapshot_merge_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) .status = snapshot_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) .iterate_devices = snapshot_iterate_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) .io_hints = snapshot_io_hints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) static int __init dm_snapshot_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) r = dm_exception_store_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) DMERR("Failed to initialize exception stores");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) r = init_origin_hash();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) DMERR("init_origin_hash failed.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) goto bad_origin_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) exception_cache = KMEM_CACHE(dm_exception, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) if (!exception_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) DMERR("Couldn't create exception cache.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) goto bad_exception_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) if (!pending_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) DMERR("Couldn't create pending cache.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) goto bad_pending_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) r = dm_register_target(&snapshot_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) DMERR("snapshot target register failed %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) goto bad_register_snapshot_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) r = dm_register_target(&origin_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) DMERR("Origin target register failed %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) goto bad_register_origin_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) r = dm_register_target(&merge_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) DMERR("Merge target register failed %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) goto bad_register_merge_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) bad_register_merge_target:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) dm_unregister_target(&origin_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) bad_register_origin_target:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) dm_unregister_target(&snapshot_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) bad_register_snapshot_target:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) kmem_cache_destroy(pending_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) bad_pending_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) kmem_cache_destroy(exception_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) bad_exception_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) exit_origin_hash();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) bad_origin_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) dm_exception_store_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) static void __exit dm_snapshot_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) dm_unregister_target(&snapshot_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) dm_unregister_target(&origin_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) dm_unregister_target(&merge_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) exit_origin_hash();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) kmem_cache_destroy(pending_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) kmem_cache_destroy(exception_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) dm_exception_store_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) /* Module hooks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) module_init(dm_snapshot_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) module_exit(dm_snapshot_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) MODULE_DESCRIPTION(DM_NAME " snapshot target");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) MODULE_AUTHOR("Joe Thornber");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) MODULE_ALIAS("dm-snapshot-origin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) MODULE_ALIAS("dm-snapshot-merge");