^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2006-2008 Red Hat GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "dm-exception-store.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dm-io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dm-bufio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define DM_MSG_PREFIX "persistent snapshot"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define DM_PREFETCH_CHUNKS 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Persistent snapshots, by persistent we mean that the snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * will survive a reboot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * We need to store a record of which parts of the origin have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * been copied to the snapshot device. The snapshot code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * requires that we copy exception chunks to chunk aligned areas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * of the COW store. It makes sense therefore, to store the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * metadata in chunk size blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * There is no backward or forward compatibility implemented,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * snapshots with different disk versions than the kernel will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * not be usable. It is expected that "lvcreate" will blank out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * the start of a fresh COW device before calling the snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * constructor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * The first chunk of the COW device just contains the header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * After this there is a chunk filled with exception metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * followed by as many exception chunks as can fit in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * metadata areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * All on disk structures are in little-endian format. The end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * of the exceptions info is indicated by an exception with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * new_chunk of 0, which is invalid since it would point to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * header chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SNAP_MAGIC 0x70416e53
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * The on-disk version of the metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SNAPSHOT_DISK_VERSION 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define NUM_SNAPSHOT_HDR_CHUNKS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct disk_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __le32 magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Is this snapshot valid. There is no way of recovering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * an invalid snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __le32 valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Simple, incrementing version. no backward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __le32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* In sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __le32 chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct disk_exception {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) __le64 old_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) __le64 new_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct core_exception {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) uint64_t old_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) uint64_t new_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct commit_callback {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void (*callback)(void *, int success);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * The top level structure for a persistent exception store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct pstore {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct dm_exception_store *store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) uint32_t exceptions_per_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Now that we have an asynchronous kcopyd there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * need for large chunk sizes, so it wont hurt to have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * whole chunks worth of metadata in memory at once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * An area of zeros used to clear the next area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void *zero_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * An area used for header. The header can be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * concurrently with metadata (when invalidating the snapshot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * so it needs a separate buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) void *header_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Used to keep track of which metadata area the data in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * 'chunk' refers to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) chunk_t current_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * The next free chunk for an exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * When creating exceptions, all the chunks here and above are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * free. It holds the next chunk to be allocated. On rare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * occasions (e.g. after a system crash) holes can be left in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * the exception store because chunks can be committed out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * When merging exceptions, it does not necessarily mean all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * chunks here and above are free. It holds the value it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * have held if all chunks had been committed in order of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * allocation. Consequently the value may occasionally be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * slightly too low, but since it's only used for 'status' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * it can never reach its minimum value too early this doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * matter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) chunk_t next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * The index of next free exception in the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * metadata area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) uint32_t current_committed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) atomic_t pending_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) uint32_t callback_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct commit_callback *callbacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct dm_io_client *io_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct workqueue_struct *metadata_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int alloc_area(struct pstore *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) len = ps->store->chunk_size << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Allocate the chunk_size block of memory that will hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * a single metadata area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ps->area = vmalloc(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!ps->area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto err_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ps->zero_area = vzalloc(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!ps->zero_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) goto err_zero_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ps->header_area = vmalloc(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!ps->header_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) goto err_header_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) err_header_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) vfree(ps->zero_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) err_zero_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) vfree(ps->area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) err_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void free_area(struct pstore *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) vfree(ps->area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ps->area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) vfree(ps->zero_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ps->zero_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) vfree(ps->header_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ps->header_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct mdata_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct dm_io_region *where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct dm_io_request *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static void do_metadata(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct mdata_req *req = container_of(work, struct mdata_req, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) req->result = dm_io(req->io_req, 1, req->where, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Read or write a chunk aligned and sized block of data from a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int op_flags, int metadata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct dm_io_region where = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .bdev = dm_snap_cow(ps->store->snap)->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .sector = ps->store->chunk_size * chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .count = ps->store->chunk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct dm_io_request io_req = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) .bi_op = op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .bi_op_flags = op_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .mem.type = DM_IO_VMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .mem.ptr.vma = area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .client = ps->io_client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .notify.fn = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct mdata_req req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!metadata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return dm_io(&io_req, 1, &where, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) req.where = &where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) req.io_req = &io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Issue the synchronous I/O from a different thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * to avoid submit_bio_noacct recursion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) INIT_WORK_ONSTACK(&req.work, do_metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) queue_work(ps->metadata_wq, &req.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) flush_workqueue(ps->metadata_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) destroy_work_on_stack(&req.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return req.result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Convert a metadata area index to a chunk index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static chunk_t area_location(struct pstore *ps, chunk_t area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void skip_metadata(struct pstore *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) uint32_t stride = ps->exceptions_per_area + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) chunk_t next_free = ps->next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ps->next_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Read or write a metadata area. Remembering to skip the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * chunk which holds the header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static int area_io(struct pstore *ps, int op, int op_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) chunk_t chunk = area_location(ps, ps->current_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return chunk_io(ps, ps->area, chunk, op, op_flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void zero_memory_area(struct pstore *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int zero_disk_area(struct pstore *ps, chunk_t area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return chunk_io(ps, ps->zero_area, area_location(ps, area),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) REQ_OP_WRITE, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static int read_header(struct pstore *ps, int *new_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct disk_header *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) unsigned chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int chunk_size_supplied = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) char *chunk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Use default chunk size (or logical_block_size, if larger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * if none supplied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!ps->store->chunk_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) bdev) >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ps->store->chunk_mask = ps->store->chunk_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ps->store->chunk_shift = __ffs(ps->store->chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) chunk_size_supplied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ps->io_client = dm_io_client_create();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (IS_ERR(ps->io_client))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return PTR_ERR(ps->io_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) r = alloc_area(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dh = ps->header_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (le32_to_cpu(dh->magic) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *new_snapshot = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) DMWARN("Invalid or corrupt snapshot");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *new_snapshot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ps->valid = le32_to_cpu(dh->valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ps->version = le32_to_cpu(dh->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) chunk_size = le32_to_cpu(dh->chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (ps->store->chunk_size == chunk_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (chunk_size_supplied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) DMWARN("chunk size %u in device metadata overrides "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) "table chunk size of %u.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) chunk_size, ps->store->chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* We had a bogus chunk_size. Fix stuff up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) free_area(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) &chunk_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) DMERR("invalid on-disk chunk size %u: %s.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) chunk_size, chunk_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) r = alloc_area(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) free_area(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static int write_header(struct pstore *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct disk_header *dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) dh = ps->header_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) dh->magic = cpu_to_le32(SNAP_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dh->valid = cpu_to_le32(ps->valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) dh->version = cpu_to_le32(ps->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Access functions for the disk exceptions, these do the endian conversions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) uint32_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) BUG_ON(index >= ps->exceptions_per_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return ((struct disk_exception *) ps_area) + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void read_exception(struct pstore *ps, void *ps_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) uint32_t index, struct core_exception *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct disk_exception *de = get_exception(ps, ps_area, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* copy it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) result->old_chunk = le64_to_cpu(de->old_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) result->new_chunk = le64_to_cpu(de->new_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void write_exception(struct pstore *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) uint32_t index, struct core_exception *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct disk_exception *de = get_exception(ps, ps->area, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* copy it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) de->old_chunk = cpu_to_le64(e->old_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) de->new_chunk = cpu_to_le64(e->new_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void clear_exception(struct pstore *ps, uint32_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct disk_exception *de = get_exception(ps, ps->area, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /* clear it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) de->old_chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) de->new_chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Registers the exceptions that are present in the current area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * 'full' is filled in to indicate if the area has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int insert_exceptions(struct pstore *ps, void *ps_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int (*callback)(void *callback_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) chunk_t old, chunk_t new),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) void *callback_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int *full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct core_exception e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* presume the area is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) for (i = 0; i < ps->exceptions_per_area; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) read_exception(ps, ps_area, i, &e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * If the new_chunk is pointing at the start of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * the COW device, where the first metadata area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * is we know that we've hit the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * exceptions. Therefore the area is not full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (e.new_chunk == 0LL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ps->current_committed = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) *full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Keep track of the start of the free chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (ps->next_free <= e.new_chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ps->next_free = e.new_chunk + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Otherwise we add the exception to the snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) r = callback(callback_context, e.old_chunk, e.new_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int read_exceptions(struct pstore *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int (*callback)(void *callback_context, chunk_t old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) chunk_t new),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) void *callback_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int r, full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct dm_bufio_client *client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) chunk_t prefetch_area = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ps->store->chunk_size << SECTOR_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 1, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (IS_ERR(client))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return PTR_ERR(client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * Setup for one current buffer + desired readahead buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Keeping reading chunks and inserting exceptions until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * we find a partially full area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) for (ps->current_area = 0; full; ps->current_area++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct dm_buffer *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) void *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) chunk_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (unlikely(prefetch_area < ps->current_area))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) prefetch_area = ps->current_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (DM_PREFETCH_CHUNKS) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) chunk_t pf_chunk = area_location(ps, prefetch_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dm_bufio_prefetch(client, pf_chunk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) prefetch_area++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (unlikely(!prefetch_area))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) chunk = area_location(ps, ps->current_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) area = dm_bufio_read(client, chunk, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (IS_ERR(area)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) r = PTR_ERR(area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) goto ret_destroy_bufio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) r = insert_exceptions(ps, area, callback, callback_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) &full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dm_bufio_release(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) dm_bufio_forget(client, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (unlikely(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) goto ret_destroy_bufio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ps->current_area--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) skip_metadata(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ret_destroy_bufio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) dm_bufio_client_destroy(client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static struct pstore *get_info(struct dm_exception_store *store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return (struct pstore *) store->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static void persistent_usage(struct dm_exception_store *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sector_t *total_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) sector_t *sectors_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) sector_t *metadata_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct pstore *ps = get_info(store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) *sectors_allocated = ps->next_free * store->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * First chunk is the fixed header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * Then there are (ps->current_area + 1) metadata chunks, each one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * separated from the next by ps->exceptions_per_area data chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) store->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static void persistent_dtr(struct dm_exception_store *store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct pstore *ps = get_info(store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) destroy_workqueue(ps->metadata_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Created in read_header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (ps->io_client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dm_io_client_destroy(ps->io_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) free_area(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Allocated in persistent_read_metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) vfree(ps->callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) kfree(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static int persistent_read_metadata(struct dm_exception_store *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) int (*callback)(void *callback_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) chunk_t old, chunk_t new),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) void *callback_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int r, new_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct pstore *ps = get_info(store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * Read the snapshot header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) r = read_header(ps, &new_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Now we know correct chunk_size, complete the initialisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) sizeof(struct disk_exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) sizeof(*ps->callbacks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (!ps->callbacks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * Do we need to setup a new snapshot ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (new_snapshot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) r = write_header(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) DMWARN("write_header failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ps->current_area = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) zero_memory_area(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) r = zero_disk_area(ps, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) DMWARN("zero_disk_area(0) failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * Sanity checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (ps->version != SNAPSHOT_DISK_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) DMWARN("unable to handle snapshot disk version %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ps->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * Metadata are valid, but snapshot is invalidated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (!ps->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * Read the metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) r = read_exceptions(ps, callback, callback_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static int persistent_prepare_exception(struct dm_exception_store *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct dm_exception *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct pstore *ps = get_info(store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Is there enough room ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (size < ((ps->next_free + 1) * store->chunk_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) e->new_chunk = ps->next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Move onto the next free pending, making sure to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * into account the location of the metadata chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) ps->next_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) skip_metadata(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) atomic_inc(&ps->pending_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static void persistent_commit_exception(struct dm_exception_store *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct dm_exception *e, int valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) void (*callback) (void *, int success),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) void *callback_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct pstore *ps = get_info(store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct core_exception ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct commit_callback *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (!valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ps->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ce.old_chunk = e->old_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ce.new_chunk = e->new_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) write_exception(ps, ps->current_committed++, &ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * Add the callback to the back of the array. This code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * is the only place where the callback array is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * manipulated, and we know that it will never be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * multiple times concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) cb = ps->callbacks + ps->callback_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) cb->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) cb->context = callback_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * If there are exceptions in flight and we have not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * filled this metadata area there's nothing more to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (!atomic_dec_and_test(&ps->pending_count) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) (ps->current_committed != ps->exceptions_per_area))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * If we completely filled the current area, then wipe the next one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if ((ps->current_committed == ps->exceptions_per_area) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) zero_disk_area(ps, ps->current_area + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ps->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * Commit exceptions to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (ps->valid && area_io(ps, REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ps->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * Advance to the next area if this one is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (ps->current_committed == ps->exceptions_per_area) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ps->current_committed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ps->current_area++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) zero_memory_area(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) for (i = 0; i < ps->callback_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) cb = ps->callbacks + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) cb->callback(cb->context, ps->valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ps->callback_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static int persistent_prepare_merge(struct dm_exception_store *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) chunk_t *last_old_chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) chunk_t *last_new_chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct pstore *ps = get_info(store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct core_exception ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int nr_consecutive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * When current area is empty, move back to preceding area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!ps->current_committed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * Have we finished?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!ps->current_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ps->current_area--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) r = area_io(ps, REQ_OP_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ps->current_committed = ps->exceptions_per_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) read_exception(ps, ps->area, ps->current_committed - 1, &ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) *last_old_chunk = ce.old_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) *last_new_chunk = ce.new_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * Find number of consecutive chunks within the current area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * working backwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) nr_consecutive++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) read_exception(ps, ps->area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ps->current_committed - 1 - nr_consecutive, &ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ce.new_chunk != *last_new_chunk - nr_consecutive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return nr_consecutive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static int persistent_commit_merge(struct dm_exception_store *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) int nr_merged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct pstore *ps = get_info(store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) BUG_ON(nr_merged > ps->current_committed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) for (i = 0; i < nr_merged; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) clear_exception(ps, ps->current_committed - 1 - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ps->current_committed -= nr_merged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * At this stage, only persistent_usage() uses ps->next_free, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * we make no attempt to keep ps->next_free strictly accurate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * as exceptions may have been committed out-of-order originally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * Once a snapshot has become merging, we set it to the value it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * would have held had all the exceptions been committed in order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * ps->current_area does not get reduced by prepare_merge() until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * after commit_merge() has removed the nr_merged previous exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ps->next_free = area_location(ps, ps->current_area) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ps->current_committed + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static void persistent_drop_snapshot(struct dm_exception_store *store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct pstore *ps = get_info(store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ps->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (write_header(ps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) DMWARN("write header failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static int persistent_ctr(struct dm_exception_store *store, char *options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct pstore *ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* allocate the pstore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ps = kzalloc(sizeof(*ps), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (!ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ps->store = store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ps->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) ps->version = SNAPSHOT_DISK_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ps->area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) ps->zero_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) ps->header_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ps->current_committed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ps->callback_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) atomic_set(&ps->pending_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ps->callbacks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (!ps->metadata_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) DMERR("couldn't start header metadata update thread");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) goto err_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (options) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) char overflow = toupper(options[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (overflow == 'O')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) store->userspace_supports_overflow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) DMERR("Unsupported persistent store option: %s", options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) goto err_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) store->context = ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) err_options:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) destroy_workqueue(ps->metadata_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) err_workqueue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) kfree(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static unsigned persistent_status(struct dm_exception_store *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) status_type_t status, char *result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) unsigned sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) case STATUSTYPE_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) (unsigned long long)store->chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static struct dm_exception_store_type _persistent_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) .name = "persistent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) .ctr = persistent_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .dtr = persistent_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) .read_metadata = persistent_read_metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .prepare_exception = persistent_prepare_exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .commit_exception = persistent_commit_exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) .prepare_merge = persistent_prepare_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) .commit_merge = persistent_commit_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) .drop_snapshot = persistent_drop_snapshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) .usage = persistent_usage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) .status = persistent_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static struct dm_exception_store_type _persistent_compat_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) .name = "P",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) .ctr = persistent_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) .dtr = persistent_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .read_metadata = persistent_read_metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) .prepare_exception = persistent_prepare_exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) .commit_exception = persistent_commit_exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) .prepare_merge = persistent_prepare_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) .commit_merge = persistent_commit_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) .drop_snapshot = persistent_drop_snapshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .usage = persistent_usage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .status = persistent_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) int dm_persistent_snapshot_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) r = dm_exception_store_type_register(&_persistent_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) DMERR("Unable to register persistent exception store type");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) r = dm_exception_store_type_register(&_persistent_compat_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) DMERR("Unable to register old-style persistent exception "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) "store type");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) dm_exception_store_type_unregister(&_persistent_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) void dm_persistent_snapshot_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) dm_exception_store_type_unregister(&_persistent_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) dm_exception_store_type_unregister(&_persistent_compat_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }