^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2003 Sistina Software Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/dm-dirty-log.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dm-region-hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "dm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define DM_MSG_PREFIX "region hash"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Region hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * The mirror splits itself up into discrete regions. Each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * region can be in one of three states: clean, dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * nosync. There is no need to put clean regions in the hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * In addition to being present in the hash table a region _may_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * be present on one of three lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * clean_regions: Regions on this list have no io pending to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * them, they are in sync, we are no longer interested in them,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * they are dull. dm_rh_update_states() will remove them from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * hash table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * quiesced_regions: These regions have been spun down, ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * for recovery. rh_recovery_start() will remove regions from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * this list and hand them to kmirrord, which will schedule the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * recovery io with kcopyd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * recovered_regions: Regions that kcopyd has successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * recovered. dm_rh_update_states() will now schedule any delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * io, up the recovery_count, and remove the region from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * There are 2 locks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * A rw spin lock 'hash_lock' protects just the hash table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * this is never held in write mode from interrupt context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * which I believe means that we only have to disable irqs when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * doing a write lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * An ordinary spin lock 'region_lock' that protects the three
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * lists in the region_hash, with the 'state', 'list' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * 'delayed_bios' fields of the regions. This is used from irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * context, so all other uses will have to suspend local irqs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct dm_region_hash {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) uint32_t region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned region_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* holds persistent region state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct dm_dirty_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* hash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) rwlock_t hash_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned nr_buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned prime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct list_head *buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * If there was a flush failure no regions can be marked clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int flush_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned max_recovery; /* Max # of regions to recover in parallel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) spinlock_t region_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) atomic_t recovery_in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct list_head clean_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct list_head quiesced_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct list_head recovered_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct list_head failed_recovered_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct semaphore recovery_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mempool_t region_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) sector_t target_begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Callback function to schedule bios writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) void (*dispatch_bios)(void *context, struct bio_list *bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Callback function to wakeup callers worker thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void (*wakeup_workers)(void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Callback function to wakeup callers recovery waiters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void (*wakeup_all_recovery_waiters)(void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct dm_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) region_t key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct list_head hash_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) atomic_t pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct bio_list delayed_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Conversion fns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return sector >> rh->region_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return region << rh->region_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) rh->target_begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void *dm_rh_region_context(struct dm_region *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return reg->rh->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) EXPORT_SYMBOL_GPL(dm_rh_region_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) region_t dm_rh_get_region_key(struct dm_region *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return reg->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return rh->region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * FIXME: shall we pass in a structure instead of all these args to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * dm_region_hash_create()????
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define RH_HASH_MULT 2654435387U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define RH_HASH_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define MIN_REGIONS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct dm_region_hash *dm_region_hash_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void *context, void (*dispatch_bios)(void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct bio_list *bios),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) void (*wakeup_workers)(void *context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) void (*wakeup_all_recovery_waiters)(void *context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) sector_t target_begin, unsigned max_recovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct dm_dirty_log *log, uint32_t region_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) region_t nr_regions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct dm_region_hash *rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned nr_buckets, max_buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Calculate a suitable number of buckets for our hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) max_buckets = nr_regions >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) nr_buckets >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) rh = kzalloc(sizeof(*rh), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!rh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) DMERR("unable to allocate region hash memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) rh->context = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) rh->dispatch_bios = dispatch_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) rh->wakeup_workers = wakeup_workers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) rh->target_begin = target_begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) rh->max_recovery = max_recovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) rh->log = log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) rh->region_size = region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) rh->region_shift = __ffs(region_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) rwlock_init(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) rh->mask = nr_buckets - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) rh->nr_buckets = nr_buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) rh->shift = RH_HASH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) rh->prime = RH_HASH_MULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!rh->buckets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) DMERR("unable to allocate region hash bucket memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) kfree(rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) for (i = 0; i < nr_buckets; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) INIT_LIST_HEAD(rh->buckets + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) spin_lock_init(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) sema_init(&rh->recovery_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) atomic_set(&rh->recovery_in_flight, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) INIT_LIST_HEAD(&rh->clean_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) INIT_LIST_HEAD(&rh->quiesced_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) INIT_LIST_HEAD(&rh->recovered_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) INIT_LIST_HEAD(&rh->failed_recovered_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) rh->flush_failure = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ret = mempool_init_kmalloc_pool(&rh->region_pool, MIN_REGIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) sizeof(struct dm_region));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) vfree(rh->buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) kfree(rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rh = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) EXPORT_SYMBOL_GPL(dm_region_hash_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) void dm_region_hash_destroy(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct dm_region *reg, *nreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) BUG_ON(!list_empty(&rh->quiesced_regions));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) for (h = 0; h < rh->nr_buckets; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) list_for_each_entry_safe(reg, nreg, rh->buckets + h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) hash_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) BUG_ON(atomic_read(®->pending));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) mempool_free(reg, &rh->region_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (rh->log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dm_dirty_log_destroy(rh->log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) mempool_exit(&rh->region_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) vfree(rh->buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) kfree(rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return rh->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct dm_region *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct list_head *bucket = rh->buckets + rh_hash(rh, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) list_for_each_entry(reg, bucket, hash_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (reg->key == region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct dm_region *reg, *nreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) nreg = mempool_alloc(&rh->region_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (unlikely(!nreg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) DM_RH_CLEAN : DM_RH_NOSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) nreg->rh = rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) nreg->key = region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) INIT_LIST_HEAD(&nreg->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) atomic_set(&nreg->pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) bio_list_init(&nreg->delayed_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) write_lock_irq(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) reg = __rh_lookup(rh, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* We lost the race. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mempool_free(nreg, &rh->region_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) __rh_insert(rh, nreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (nreg->state == DM_RH_CLEAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) spin_lock(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) list_add(&nreg->list, &rh->clean_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) spin_unlock(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) reg = nreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) write_unlock_irq(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct dm_region *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) reg = __rh_lookup(rh, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) read_unlock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) reg = __rh_alloc(rh, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) read_lock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct dm_region *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) read_lock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) reg = __rh_lookup(rh, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) read_unlock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return reg->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * The region wasn't in the hash, so we fall back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * dirty log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) r = rh->log->type->in_sync(rh->log, region, may_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * Any error from the dirty log (eg. -EWOULDBLOCK) gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * taken as a DM_RH_NOSYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) EXPORT_SYMBOL_GPL(dm_rh_get_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static void complete_resync_work(struct dm_region *reg, int success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct dm_region_hash *rh = reg->rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rh->log->type->set_region_sync(rh->log, reg->key, success);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * Dispatch the bios before we call 'wake_up_all'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * This is important because if we are suspending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * we want to know that recovery is complete and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * the work queue is flushed. If we wake_up_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * before we dispatch_bios (queue bios and call wake()),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * then we risk suspending before the work queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * has been properly flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) rh->dispatch_bios(rh->context, ®->delayed_bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (atomic_dec_and_test(&rh->recovery_in_flight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rh->wakeup_all_recovery_waiters(rh->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) up(&rh->recovery_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* dm_rh_mark_nosync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * @bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * The bio was written on some mirror(s) but failed on other mirror(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * We can successfully endio the bio but should avoid the region being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * marked clean by setting the state DM_RH_NOSYNC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * This function is _not_ safe in interrupt context!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct dm_dirty_log *log = rh->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct dm_region *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) region_t region = dm_rh_bio_to_region(rh, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int recovering = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (bio->bi_opf & REQ_PREFLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) rh->flush_failure = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (bio_op(bio) == REQ_OP_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* We must inform the log that the sync count has changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) log->type->set_region_sync(log, region, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) read_lock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) reg = __rh_find(rh, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) read_unlock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* region hash entry should exist because write was in-flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) BUG_ON(!reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) BUG_ON(!list_empty(®->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) spin_lock_irqsave(&rh->region_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Possible cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * 1) DM_RH_DIRTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * 2) DM_RH_NOSYNC: was dirty, other preceding writes failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * 3) DM_RH_RECOVERING: flushing pending writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Either case, the region should have not been connected to list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) recovering = (reg->state == DM_RH_RECOVERING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) reg->state = DM_RH_NOSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) BUG_ON(!list_empty(®->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) spin_unlock_irqrestore(&rh->region_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (recovering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) complete_resync_work(reg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct dm_region *reg, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) LIST_HEAD(clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) LIST_HEAD(recovered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) LIST_HEAD(failed_recovered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Quickly grab the lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) write_lock_irq(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) spin_lock(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!list_empty(&rh->clean_regions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) list_splice_init(&rh->clean_regions, &clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) list_for_each_entry(reg, &clean, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) list_del(®->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!list_empty(&rh->recovered_regions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) list_splice_init(&rh->recovered_regions, &recovered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) list_for_each_entry(reg, &recovered, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) list_del(®->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!list_empty(&rh->failed_recovered_regions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) list_splice_init(&rh->failed_recovered_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) &failed_recovered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) list_for_each_entry(reg, &failed_recovered, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) list_del(®->hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) spin_unlock(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) write_unlock_irq(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * All the regions on the recovered and clean lists have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * now been pulled out of the system, so no need to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * any more locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) list_for_each_entry_safe(reg, next, &recovered, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) rh->log->type->clear_region(rh->log, reg->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) complete_resync_work(reg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) mempool_free(reg, &rh->region_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) list_for_each_entry_safe(reg, next, &failed_recovered, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) complete_resync_work(reg, errors_handled ? 0 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) mempool_free(reg, &rh->region_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) list_for_each_entry_safe(reg, next, &clean, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) rh->log->type->clear_region(rh->log, reg->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) mempool_free(reg, &rh->region_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) rh->log->type->flush(rh->log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) EXPORT_SYMBOL_GPL(dm_rh_update_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static void rh_inc(struct dm_region_hash *rh, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct dm_region *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) read_lock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) reg = __rh_find(rh, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) spin_lock_irq(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) atomic_inc(®->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (reg->state == DM_RH_CLEAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) reg->state = DM_RH_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) list_del_init(®->list); /* take off the clean list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) spin_unlock_irq(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) rh->log->type->mark_region(rh->log, reg->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_unlock_irq(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) read_unlock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) for (bio = bios->head; bio; bio = bio->bi_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) rh_inc(rh, dm_rh_bio_to_region(rh, bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) void dm_rh_dec(struct dm_region_hash *rh, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct dm_region *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int should_wake = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) read_lock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) reg = __rh_lookup(rh, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) read_unlock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_lock_irqsave(&rh->region_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (atomic_dec_and_test(®->pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * There is no pending I/O for this region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * We can move the region to corresponding list for next action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * At this point, the region is not yet connected to any list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * If the state is DM_RH_NOSYNC, the region should be kept off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * from clean list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * The hash entry for DM_RH_NOSYNC will remain in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * until the region is recovered or the map is reloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* do nothing for DM_RH_NOSYNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (unlikely(rh->flush_failure)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * If a write flush failed some time ago, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * don't know whether or not this write made it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * to the disk, so we must resync the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) reg->state = DM_RH_NOSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) } else if (reg->state == DM_RH_RECOVERING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) list_add_tail(®->list, &rh->quiesced_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) } else if (reg->state == DM_RH_DIRTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) reg->state = DM_RH_CLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) list_add(®->list, &rh->clean_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) should_wake = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) spin_unlock_irqrestore(&rh->region_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (should_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) rh->wakeup_workers(rh->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) EXPORT_SYMBOL_GPL(dm_rh_dec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Starts quiescing a region in preparation for recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static int __rh_recovery_prepare(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) region_t region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct dm_region *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Ask the dirty log what's next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) r = rh->log->type->get_resync_work(rh->log, ®ion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (r <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Get this region, and start it quiescing by setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * recovering flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) read_lock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) reg = __rh_find(rh, region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) read_unlock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) spin_lock_irq(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) reg->state = DM_RH_RECOVERING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Already quiesced ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (atomic_read(®->pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) list_del_init(®->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) list_move(®->list, &rh->quiesced_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) spin_unlock_irq(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) void dm_rh_recovery_prepare(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* Extra reference to avoid race with dm_rh_stop_recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) atomic_inc(&rh->recovery_in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) while (!down_trylock(&rh->recovery_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) atomic_inc(&rh->recovery_in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (__rh_recovery_prepare(rh) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) atomic_dec(&rh->recovery_in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) up(&rh->recovery_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* Drop the extra reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (atomic_dec_and_test(&rh->recovery_in_flight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) rh->wakeup_all_recovery_waiters(rh->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * Returns any quiesced regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct dm_region *reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) spin_lock_irq(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (!list_empty(&rh->quiesced_regions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) reg = list_entry(rh->quiesced_regions.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct dm_region, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) list_del_init(®->list); /* remove from the quiesced list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_unlock_irq(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) void dm_rh_recovery_end(struct dm_region *reg, int success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct dm_region_hash *rh = reg->rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) spin_lock_irq(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) list_add(®->list, ®->rh->recovered_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) list_add(®->list, ®->rh->failed_recovered_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_unlock_irq(&rh->region_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) rh->wakeup_workers(rh->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Return recovery in flight count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return atomic_read(&rh->recovery_in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int dm_rh_flush(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return rh->log->type->flush(rh->log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) EXPORT_SYMBOL_GPL(dm_rh_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct dm_region *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) read_lock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) bio_list_add(®->delayed_bios, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) read_unlock(&rh->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) EXPORT_SYMBOL_GPL(dm_rh_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) void dm_rh_stop_recovery(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* wait for any recovering regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) for (i = 0; i < rh->max_recovery; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) down(&rh->recovery_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) void dm_rh_start_recovery(struct dm_region_hash *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) for (i = 0; i < rh->max_recovery; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) up(&rh->recovery_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) rh->wakeup_workers(rh->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) MODULE_DESCRIPTION(DM_NAME " region hash");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) MODULE_LICENSE("GPL");