^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015 Shaohua Li <shli@fb.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/raid/md_p.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/crc32c.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "md.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "raid5.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "md-bitmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "raid5-log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * metadata/data stored in disk with 4k size unit (a block) regardless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * underneath hardware sector size. only works with PAGE_SIZE == 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define BLOCK_SECTORS (8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define BLOCK_SECTOR_SHIFT (3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * In write through mode, the reclaim runs every log->max_free_space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * This can prevent the recovery scans for too long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* wake up reclaim thread periodically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* start flush with these full stripes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* reclaim stripes in groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * We only need 2 bios per I/O unit to make progress, but ensure we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * have a few more available to not get too tight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define R5L_POOL_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static char *r5c_journal_mode_str[] = {"write-through",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) "write-back"};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * raid5 cache state machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * With the RAID cache, each stripe works in two phases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * - caching phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * - writing-out phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * These two phases are controlled by bit STRIPE_R5C_CACHING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * When there is no journal, or the journal is in write-through mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * the stripe is always in writing-out phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * For write-back journal, the stripe is sent to caching phase on write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * the write-out phase by clearing STRIPE_R5C_CACHING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Stripes in caching phase do not write the raid disks. Instead, all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * writes are committed from the log device. Therefore, a stripe in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * caching phase handles writes as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * - write to log device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * - return IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Stripes in writing-out phase handle writes as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * - calculate parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * - write pending data and parity to journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * - write data and parity to raid disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * - return IO for pending writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct r5l_log {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u32 uuid_checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) sector_t device_size; /* log device size, round to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * BLOCK_SECTORS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) sector_t max_free_space; /* reclaim run if free space is at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * this size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) sector_t last_checkpoint; /* log tail. where recovery scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * starts from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u64 last_cp_seq; /* log tail sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) sector_t log_start; /* log head. where new data appends */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u64 seq; /* log head sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) sector_t next_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct mutex io_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct r5l_io_unit *current_io; /* current io_unit accepting new data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) spinlock_t io_list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct list_head running_ios; /* io_units which are still running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * and have not yet been completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * written to the log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct list_head io_end_ios; /* io_units which have been completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * written to the log but not yet written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * to the RAID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct list_head flushing_ios; /* io_units which are waiting for log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * cache flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct list_head finished_ios; /* io_units which settle down in log disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct bio flush_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct kmem_cache *io_kc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) mempool_t io_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct bio_set bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mempool_t meta_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct md_thread *reclaim_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long reclaim_target; /* number of space that need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * reclaimed. if it's 0, reclaim spaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * used by io_units which are in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * IO_UNIT_STRIPE_END state (eg, reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * dones't wait for specific io_unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * switching to IO_UNIT_STRIPE_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * state) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) wait_queue_head_t iounit_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct list_head no_space_stripes; /* pending stripes, log has no space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) spinlock_t no_space_stripes_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) bool need_cache_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* for r5c_cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) enum r5c_journal_mode r5c_journal_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* all stripes in r5cache, in the order of seq at sh->log_start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct list_head stripe_in_journal_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) spinlock_t stripe_in_journal_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) atomic_t stripe_in_journal_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* to submit async io_units, to fulfill ordering of flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct work_struct deferred_io_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* to disable write back during in degraded mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct work_struct disable_writeback_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* to for chunk_aligned_read in writeback mode, details below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) spinlock_t tree_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct radix_tree_root big_stripe_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Enable chunk_aligned_read() with write back cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Each chunk may contain more than one stripe (for example, a 256kB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * chunk_aligned_read, these stripes are grouped into one "big_stripe".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * For each big_stripe, we count how many stripes of this big_stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * are in the write back cache. These data are tracked in a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * (big_stripe_tree). We use radix_tree item pointer as the counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * r5c_tree_index() is used to calculate keys for the radix tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * big_stripe of each chunk in the tree. If this big_stripe is in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * tree, chunk_aligned_read() aborts. This look up is protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * rcu_read_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * It is necessary to remember whether a stripe is counted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * two flags are set, the stripe is counted in big_stripe_tree. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * r5c_try_caching_write(); and moving clear_bit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * r5c_finish_stripe_write_out().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * radix tree requests lowest 2 bits of data pointer to be 2b'00.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * So it is necessary to left shift the counter by 2 bits before using it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * as data pointer of the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define R5C_RADIX_COUNT_SHIFT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * calculate key for big_stripe_tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * sect: align_bi->bi_iter.bi_sector or sh->sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static inline sector_t r5c_tree_index(struct r5conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) sector_t sect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) sector_div(sect, conf->chunk_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * an IO range starts from a meta data block and end at the next meta data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * block. The io unit's the meta data block tracks data/parity followed it. io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * unit is written to log disk with normal write, as we always flush log disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * first and then start move data to raid disks, there is no requirement to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * write io unit with FLUSH/FUA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct r5l_io_unit {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct r5l_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct page *meta_page; /* store meta block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int meta_offset; /* current offset in meta_page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct bio *current_bio;/* current_bio accepting new data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) atomic_t pending_stripe;/* how many stripes not flushed to raid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u64 seq; /* seq number of the metablock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) sector_t log_start; /* where the io_unit starts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) sector_t log_end; /* where the io_unit ends */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct list_head log_sibling; /* log->running_ios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct list_head stripe_list; /* stripes added to the io_unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) bool need_split_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct bio *split_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned int has_flush:1; /* include flush request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned int has_fua:1; /* include fua request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned int has_null_flush:1; /* include null flush request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned int has_flush_payload:1; /* include flush payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * io isn't sent yet, flush/fua request can only be submitted till it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * the first IO in running_ios list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned int io_deferred:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct bio_list flush_barriers; /* size == 0 flush bios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* r5l_io_unit state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) enum r5l_io_unit_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) IO_UNIT_RUNNING = 0, /* accepting new IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * don't accepting new bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) bool r5c_is_writeback(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return (log != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) start += inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (start >= log->device_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) start = start - log->device_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) sector_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (end >= start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return end + log->device_size - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) sector_t used_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) used_size = r5l_ring_distance(log, log->last_checkpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) log->log_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return log->device_size > used_size + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) enum r5l_io_unit_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (WARN_ON(io->state >= state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) io->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct bio *wbi, *wbi2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) wbi = dev->written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dev->written = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) while (wbi && wbi->bi_iter.bi_sector <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dev->sector + RAID5_STRIPE_SECTORS(conf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) wbi2 = r5_next_bio(conf, wbi, dev->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) md_write_end(conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) bio_endio(wbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) wbi = wbi2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) void r5c_handle_cached_data_endio(struct r5conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct stripe_head *sh, int disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) for (i = sh->disks; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (sh->dev[i].written) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) set_bit(R5_UPTODATE, &sh->dev[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) r5c_return_dev_pending_writes(conf, &sh->dev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) RAID5_STRIPE_SECTORS(conf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) !test_bit(STRIPE_DEGRADED, &sh->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Check whether we should flush some stripes to free up stripe cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) void r5c_check_stripe_cache_usage(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int total_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!r5c_is_writeback(conf->log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) atomic_read(&conf->r5c_cached_full_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * The following condition is true for either of the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * - stripe cache pressure high:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * total_cached > 3/4 min_nr_stripes ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * empty_inactive_list_nr > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * - stripe cache pressure moderate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * total_cached > 1/2 min_nr_stripes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (total_cached > conf->min_nr_stripes * 1 / 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) atomic_read(&conf->empty_inactive_list_nr) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) r5l_wake_reclaim(conf->log, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * stripes in the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void r5c_check_cached_full_stripe(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!r5c_is_writeback(conf->log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * or a full stripe (chunk size / 4k stripes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (atomic_read(&conf->r5c_cached_full_stripes) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) r5l_wake_reclaim(conf->log, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Total log space (in sectors) needed to flush all data in cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * To avoid deadlock due to log space, it is necessary to reserve log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * space to flush critical stripes (stripes that occupying log space near
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * last_checkpoint). This function helps check how much log space is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * required to flush all cached stripes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * To reduce log space requirements, two mechanisms are used to give cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * flush higher priorities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * stripes ALREADY in journal can be flushed w/o pending writes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * can be delayed (r5l_add_no_space_stripe).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * In cache flush, the stripe goes through 1 and then 2. For a stripe that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * already passed 1, flushing it requires at most (conf->max_degraded + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * pages of journal space. For stripes that has not passed 1, flushing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * requires (conf->raid_disks + 1) pages of journal space. There are at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * required to flush all cached stripes (in pages) is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * (group_cnt + 1) * (raid_disks + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * (stripe_in_journal_count) * (max_degraded + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * (group_cnt + 1) * (raid_disks - max_degraded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!r5c_is_writeback(log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return BLOCK_SECTORS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * device is less than 2x of reclaim_required_space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static inline void r5c_update_log_state(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct r5conf *conf = log->rdev->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) sector_t free_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sector_t reclaim_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) bool wake_reclaim = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!r5c_is_writeback(log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) free_space = r5l_ring_distance(log, log->log_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) log->last_checkpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) reclaim_space = r5c_log_required_to_flush_cache(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (free_space < 2 * reclaim_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) wake_reclaim = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (free_space < 3 * reclaim_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) set_bit(R5C_LOG_TIGHT, &conf->cache_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (wake_reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) r5l_wake_reclaim(log, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * This function should only be called in write-back mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) void r5c_make_stripe_write_out(struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct r5conf *conf = sh->raid_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) BUG_ON(!r5c_is_writeback(log));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) clear_bit(STRIPE_R5C_CACHING, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) atomic_inc(&conf->preread_active_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static void r5c_handle_data_cached(struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) for (i = sh->disks; i--; )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) set_bit(R5_InJournal, &sh->dev[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) clear_bit(R5_LOCKED, &sh->dev[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * this journal write must contain full parity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * it may also contain some data pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static void r5c_handle_parity_cached(struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) for (i = sh->disks; i--; )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (test_bit(R5_InJournal, &sh->dev[i].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) set_bit(R5_Wantwrite, &sh->dev[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * Setting proper flags after writing (or flushing) data and/or parity to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static void r5c_finish_cache_stripe(struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct r5l_log *log = sh->raid_conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Set R5_InJournal for parity dev[pd_idx]. This means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * all data AND parity in the journal. For RAID 6, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * NOT necessary to set the flag for dev[qd_idx], as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * two parities are written out together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) r5c_handle_data_cached(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) r5c_handle_parity_cached(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static void r5l_io_run_stripes(struct r5l_io_unit *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct stripe_head *sh, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) list_del_init(&sh->log_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) r5c_finish_cache_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) set_bit(STRIPE_HANDLE, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static void r5l_log_run_stripes(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct r5l_io_unit *io, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) lockdep_assert_held(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* don't change list order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (io->state < IO_UNIT_IO_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) list_move_tail(&io->log_sibling, &log->finished_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) r5l_io_run_stripes(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static void r5l_move_to_end_ios(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct r5l_io_unit *io, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) lockdep_assert_held(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* don't change list order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (io->state < IO_UNIT_IO_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) list_move_tail(&io->log_sibling, &log->io_end_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static void r5l_log_endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct r5l_io_unit *io = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct r5l_io_unit *io_deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct r5l_log *log = io->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) bool has_null_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) bool has_flush_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) md_error(log->rdev->mddev, log->rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) mempool_free(io->meta_page, &log->meta_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) spin_lock_irqsave(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * if the io doesn't not have null_flush or flush payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * it is not safe to access it after releasing io_list_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * Therefore, it is necessary to check the condition with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * the lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) has_null_flush = io->has_null_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) has_flush_payload = io->has_flush_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (log->need_cache_flush && !list_empty(&io->stripe_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) r5l_move_to_end_ios(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) r5l_log_run_stripes(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (!list_empty(&log->running_ios)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * FLUSH/FUA io_unit is deferred because of ordering, now we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * can dispatch it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) io_deferred = list_first_entry(&log->running_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct r5l_io_unit, log_sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (io_deferred->io_deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) schedule_work(&log->deferred_io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) spin_unlock_irqrestore(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (log->need_cache_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) md_wakeup_thread(log->rdev->mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (has_null_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct bio *bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) WARN_ON(bio_list_empty(&io->flush_barriers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) bio_endio(bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (atomic_dec_and_test(&io->pending_stripe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) __r5l_stripe_write_finished(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* decrease pending_stripe for flush payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (has_flush_payload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (atomic_dec_and_test(&io->pending_stripe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) __r5l_stripe_write_finished(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) spin_lock_irqsave(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) spin_unlock_irqrestore(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * In case of journal device failures, submit_bio will get error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * and calls endio, then active stripes will continue write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * process. Therefore, it is not necessary to check Faulty bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * of journal device here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * We can't check split_bio after current_bio is submitted. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * io->split_bio is null, after current_bio is submitted, current_bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * might already be completed and the io_unit is freed. We submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * split_bio first to avoid the issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (io->split_bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (io->has_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) io->split_bio->bi_opf |= REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (io->has_fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) io->split_bio->bi_opf |= REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) submit_bio(io->split_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (io->has_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) io->current_bio->bi_opf |= REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (io->has_fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) io->current_bio->bi_opf |= REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) submit_bio(io->current_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* deferred io_unit will be dispatched here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static void r5l_submit_io_async(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct r5l_log *log = container_of(work, struct r5l_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) deferred_io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct r5l_io_unit *io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) spin_lock_irqsave(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (!list_empty(&log->running_ios)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) io = list_first_entry(&log->running_ios, struct r5l_io_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) log_sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!io->io_deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) io->io_deferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) spin_unlock_irqrestore(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) r5l_do_submit_io(log, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static void r5c_disable_writeback_async(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct r5l_log *log = container_of(work, struct r5l_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) disable_writeback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* wait superblock change before suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) wait_event(mddev->sb_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) conf->log == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) (locked = mddev_trylock(mddev))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) mddev_suspend(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mddev_resume(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) mddev_unlock(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static void r5l_submit_current_io(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct r5l_io_unit *io = log->current_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct r5l_meta_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) bool do_submit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) block = page_address(io->meta_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) block->meta_size = cpu_to_le32(io->meta_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) block->checksum = cpu_to_le32(crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) log->current_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) spin_lock_irqsave(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (io->has_flush || io->has_fua) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (io != list_first_entry(&log->running_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct r5l_io_unit, log_sibling)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) io->io_deferred = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) do_submit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) spin_unlock_irqrestore(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (do_submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) r5l_do_submit_io(log, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static struct bio *r5l_bio_alloc(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) bio_set_dev(bio, log->rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) r5c_update_log_state(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * If we filled up the log device start from the beginning again,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * which will require a new bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * Note: for this to work properly the log size needs to me a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * of BLOCK_SECTORS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (log->log_start == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) io->need_split_bio = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) io->log_end = log->log_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct r5l_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct r5l_meta_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (!io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) memset(io, 0, sizeof(*io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) io->log = log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) INIT_LIST_HEAD(&io->log_sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) INIT_LIST_HEAD(&io->stripe_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) bio_list_init(&io->flush_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) io->state = IO_UNIT_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) block = page_address(io->meta_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) clear_page(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) block->magic = cpu_to_le32(R5LOG_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) block->version = R5LOG_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) block->seq = cpu_to_le64(log->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) block->position = cpu_to_le64(log->log_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) io->log_start = log->log_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) io->meta_offset = sizeof(struct r5l_meta_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) io->seq = log->seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) io->current_bio = r5l_bio_alloc(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) io->current_bio->bi_end_io = r5l_log_endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) io->current_bio->bi_private = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) r5_reserve_log_entry(log, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) spin_lock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) list_add_tail(&io->log_sibling, &log->running_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (log->current_io &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) log->current_io->meta_offset + payload_size > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) r5l_submit_current_io(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!log->current_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) log->current_io = r5l_new_meta(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (!log->current_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) sector_t location,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u32 checksum1, u32 checksum2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) bool checksum2_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct r5l_io_unit *io = log->current_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct r5l_payload_data_parity *payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) payload = page_address(io->meta_page) + io->meta_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) payload->header.type = cpu_to_le16(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) payload->header.flags = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) (PAGE_SHIFT - 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) payload->location = cpu_to_le64(location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) payload->checksum[0] = cpu_to_le32(checksum1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (checksum2_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) payload->checksum[1] = cpu_to_le32(checksum2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) io->meta_offset += sizeof(struct r5l_payload_data_parity) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) sizeof(__le32) * (1 + !!checksum2_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct r5l_io_unit *io = log->current_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (io->need_split_bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) BUG_ON(io->split_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) io->split_bio = io->current_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) io->current_bio = r5l_bio_alloc(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) bio_chain(io->current_bio, io->split_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) io->need_split_bio = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) r5_reserve_log_entry(log, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct r5l_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct r5l_payload_flush *payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) int meta_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * payload_flush requires extra writes to the journal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * To avoid handling the extra IO in quiesce, just skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * flush_payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (conf->quiesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) mutex_lock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (r5l_get_meta(log, meta_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* current implementation is one stripe per flush payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) io = log->current_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) payload = page_address(io->meta_page) + io->meta_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) payload->header.flags = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) payload->size = cpu_to_le32(sizeof(__le64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) payload->flush_stripes[0] = cpu_to_le64(sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) io->meta_offset += meta_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* multiple flush payloads count as one pending_stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!io->has_flush_payload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) io->has_flush_payload = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) atomic_inc(&io->pending_stripe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int data_pages, int parity_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) int meta_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct r5l_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) meta_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * data_pages) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) sizeof(struct r5l_payload_data_parity) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) sizeof(__le32) * parity_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ret = r5l_get_meta(log, meta_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) io = log->current_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) io->has_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (i = 0; i < sh->disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) test_bit(R5_InJournal, &sh->dev[i].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (i == sh->pd_idx || i == sh->qd_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) io->has_fua = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * we need to flush journal to make sure recovery can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * reach the data with fua flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) io->has_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) raid5_compute_blocknr(sh, i, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) sh->dev[i].log_checksum, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) r5l_append_payload_page(log, sh->dev[i].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (parity_pages == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) sh->sector, sh->dev[sh->pd_idx].log_checksum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) sh->dev[sh->qd_idx].log_checksum, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) } else if (parity_pages == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) sh->sector, sh->dev[sh->pd_idx].log_checksum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } else /* Just writing data, not parity, in caching phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) BUG_ON(parity_pages != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) list_add_tail(&sh->log_list, &io->stripe_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) atomic_inc(&io->pending_stripe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) sh->log_io = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (sh->log_start == MaxSector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) BUG_ON(!list_empty(&sh->r5c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) sh->log_start = io->log_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) spin_lock_irq(&log->stripe_in_journal_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) list_add_tail(&sh->r5c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) &log->stripe_in_journal_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) spin_unlock_irq(&log->stripe_in_journal_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) atomic_inc(&log->stripe_in_journal_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* add stripe to no_space_stripes, and then wake up reclaim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static inline void r5l_add_no_space_stripe(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) spin_lock(&log->no_space_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) list_add_tail(&sh->log_list, &log->no_space_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) spin_unlock(&log->no_space_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * running in raid5d, where reclaim could wait for raid5d too (when it flushes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * data from log to raid disks), so we shouldn't wait for reclaim here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct r5conf *conf = sh->raid_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) int write_disks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) int data_pages, parity_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) int reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) bool wake_reclaim = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* Don't support stripe batch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) test_bit(STRIPE_SYNCING, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* the stripe is written to log, we start writing it to raid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) for (i = 0; i < sh->disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) test_bit(R5_InJournal, &sh->dev[i].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) write_disks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /* checksum is already calculated in last run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) addr = kmap_atomic(sh->dev[i].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) parity_pages = 1 + !!(sh->qd_idx >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) data_pages = write_disks - parity_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) set_bit(STRIPE_LOG_TRAPPED, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * The stripe must enter state machine again to finish the write, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * don't delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) clear_bit(STRIPE_DELAYED, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) atomic_inc(&sh->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) mutex_lock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* meta + data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (!r5l_has_free_space(log, reserve)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) r5l_add_no_space_stripe(log, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) wake_reclaim = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) spin_lock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) list_add_tail(&sh->log_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) &log->no_mem_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * log space critical, do not process stripes that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * not in cache yet (sh->log_start == MaxSector).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) sh->log_start == MaxSector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) r5l_add_no_space_stripe(log, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) wake_reclaim = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) reserve = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) } else if (!r5l_has_free_space(log, reserve)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (sh->log_start == log->last_checkpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) r5l_add_no_space_stripe(log, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) spin_lock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) list_add_tail(&sh->log_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) &log->no_mem_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (wake_reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) r5l_wake_reclaim(log, reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) void r5l_write_stripe_run(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) mutex_lock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) r5l_submit_current_io(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * in write through (journal only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * we flush log disk cache first, then write stripe data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * raid disks. So if bio is finished, the log disk cache is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * flushed already. The recovery guarantees we can recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * the bio from log disk, so we don't need to flush again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (bio->bi_iter.bi_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) bio->bi_opf &= ~REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* write back (with cache) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (bio->bi_iter.bi_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) mutex_lock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) r5l_get_meta(log, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) bio_list_add(&log->current_io->flush_barriers, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) log->current_io->has_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) log->current_io->has_null_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) atomic_inc(&log->current_io->pending_stripe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) r5l_submit_current_io(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* This will run after log space is reclaimed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static void r5l_run_no_space_stripes(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) spin_lock(&log->no_space_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) while (!list_empty(&log->no_space_stripes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) sh = list_first_entry(&log->no_space_stripes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) struct stripe_head, log_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) list_del_init(&sh->log_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) set_bit(STRIPE_HANDLE, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) spin_unlock(&log->no_space_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * calculate new last_checkpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * for write through mode, returns log->next_checkpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * for write back, returns log_start of first sh in stripe_in_journal_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static sector_t r5c_calculate_new_cp(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) sector_t new_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return log->next_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (list_empty(&conf->log->stripe_in_journal_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* all stripes flushed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return log->next_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) sh = list_first_entry(&conf->log->stripe_in_journal_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct stripe_head, r5c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) new_cp = sh->log_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return new_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static sector_t r5l_reclaimable_space(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct r5conf *conf = log->rdev->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return r5l_ring_distance(log, log->last_checkpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) r5c_calculate_new_cp(conf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static void r5l_run_no_mem_stripe(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) lockdep_assert_held(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (!list_empty(&log->no_mem_stripes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) sh = list_first_entry(&log->no_mem_stripes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct stripe_head, log_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) list_del_init(&sh->log_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) set_bit(STRIPE_HANDLE, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static bool r5l_complete_finished_ios(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct r5l_io_unit *io, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) lockdep_assert_held(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* don't change list order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (io->state < IO_UNIT_STRIPE_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) log->next_checkpoint = io->log_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) list_del(&io->log_sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) mempool_free(io, &log->io_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) r5l_run_no_mem_stripe(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct r5l_log *log = io->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct r5conf *conf = log->rdev->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) spin_lock_irqsave(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!r5l_complete_finished_ios(log)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) spin_unlock_irqrestore(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (r5l_reclaimable_space(log) > log->max_free_space ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) test_bit(R5C_LOG_TIGHT, &conf->cache_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) r5l_wake_reclaim(log, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) spin_unlock_irqrestore(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) wake_up(&log->iounit_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) void r5l_stripe_write_finished(struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct r5l_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) io = sh->log_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) sh->log_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (io && atomic_dec_and_test(&io->pending_stripe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) __r5l_stripe_write_finished(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static void r5l_log_flush_endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct r5l_log *log = container_of(bio, struct r5l_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) flush_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct r5l_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) md_error(log->rdev->mddev, log->rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) spin_lock_irqsave(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) list_for_each_entry(io, &log->flushing_ios, log_sibling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) r5l_io_run_stripes(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) spin_unlock_irqrestore(&log->io_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * Starting dispatch IO to raid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * io_unit(meta) consists of a log. There is one situation we want to avoid. A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * broken meta in the middle of a log causes recovery can't find meta at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * head of log. If operations require meta at the head persistent in log, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * must make sure meta before it persistent in log too. A case is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * stripe data/parity is in log, we start write stripe to raid disks. stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * data/parity must be persistent in log before we do the write to raid disks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * The solution is we restrictly maintain io_unit list order. In this case, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * only write stripes of an io_unit to raid disks till the io_unit is the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * one whose data/parity is in log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) void r5l_flush_stripe_to_raid(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) bool do_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (!log || !log->need_cache_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) spin_lock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* flush bio is running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!list_empty(&log->flushing_ios)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) do_flush = !list_empty(&log->flushing_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (!do_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) bio_reset(&log->flush_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) bio_set_dev(&log->flush_bio, log->rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) log->flush_bio.bi_end_io = r5l_log_flush_endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) submit_bio(&log->flush_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static void r5l_write_super(struct r5l_log *log, sector_t cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static void r5l_write_super_and_discard_space(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) sector_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct block_device *bdev = log->rdev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct mddev *mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) r5l_write_super(log, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (!blk_queue_discard(bdev_get_queue(bdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * Discard could zero data, so before discard we must make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * superblock is updated to new log tail. Updating superblock (either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * directly call md_update_sb() or depend on md thread) must hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * reconfig mutex. On the other hand, raid5_quiesce is called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * for all IO finish, hence waitting for reclaim thread, while reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * thread is calling this function and waitting for reconfig mutex. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * there is a deadlock. We workaround this issue with a trylock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * FIXME: we could miss discard if we can't take reconfig mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) set_mask_bits(&mddev->sb_flags, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!mddev_trylock(mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) md_update_sb(mddev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) mddev_unlock(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* discard IO error really doesn't matter, ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (log->last_checkpoint < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) blkdev_issue_discard(bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) log->last_checkpoint + log->rdev->data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) end - log->last_checkpoint, GFP_NOIO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) blkdev_issue_discard(bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) log->last_checkpoint + log->rdev->data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) log->device_size - log->last_checkpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) GFP_NOIO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) blkdev_issue_discard(bdev, log->rdev->data_offset, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) GFP_NOIO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * must hold conf->device_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) BUG_ON(list_empty(&sh->lru));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * The stripe is not ON_RELEASE_LIST, so it is safe to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * raid5_release_stripe() while holding conf->device_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) lockdep_assert_held(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) list_del_init(&sh->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) atomic_inc(&sh->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) set_bit(STRIPE_HANDLE, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) atomic_inc(&conf->active_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) r5c_make_stripe_write_out(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) atomic_inc(&conf->r5c_flushing_partial_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) atomic_inc(&conf->r5c_flushing_full_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * if num == 0, flush all full stripes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * if num > 0, flush all full stripes. If less than num full stripes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * flushed, flush some partial stripes until totally num stripes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * flushed or there is no more cached stripes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) void r5c_flush_cache(struct r5conf *conf, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct stripe_head *sh, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) lockdep_assert_held(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (!conf->log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) r5c_flush_stripe(conf, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (count >= num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) list_for_each_entry_safe(sh, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) &conf->r5c_partial_stripe_list, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) r5c_flush_stripe(conf, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (++count >= num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static void r5c_do_reclaim(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) int total_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) int stripes_to_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) int flushing_partial, flushing_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (!r5c_is_writeback(log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) atomic_read(&conf->r5c_cached_full_stripes) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) flushing_full - flushing_partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (total_cached > conf->min_nr_stripes * 3 / 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) atomic_read(&conf->empty_inactive_list_nr) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * if stripe cache pressure high, flush all full stripes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * some partial stripes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) R5C_FULL_STRIPE_FLUSH_BATCH(conf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * if stripe cache pressure moderate, or if there is many full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * stripes,flush all full stripes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) stripes_to_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* no need to flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) stripes_to_flush = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (stripes_to_flush >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) r5c_flush_cache(conf, stripes_to_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /* if log space is tight, flush stripes on stripe_in_journal_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) spin_lock(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * stripes on stripe_in_journal_list could be in any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * state of the stripe_cache state machine. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * case, we only want to flush stripe on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * r5c_cached_full/partial_stripes. The following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * condition makes sure the stripe is on one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * two lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!list_empty(&sh->lru) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) !test_bit(STRIPE_HANDLE, &sh->state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) atomic_read(&sh->count) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) r5c_flush_stripe(conf, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) spin_unlock(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) r5l_run_no_space_stripes(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) md_wakeup_thread(conf->mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static void r5l_do_reclaim(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) struct r5conf *conf = log->rdev->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) sector_t reclaim_target = xchg(&log->reclaim_target, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) sector_t reclaimable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) sector_t next_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) bool write_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) spin_lock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) write_super = r5l_reclaimable_space(log) > log->max_free_space ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) reclaim_target != 0 || !list_empty(&log->no_space_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * move proper io_unit to reclaim list. We should not change the order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * reclaimable/unreclaimable io_unit can be mixed in the list, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * shouldn't reuse space of an unreclaimable io_unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) reclaimable = r5l_reclaimable_space(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (reclaimable >= reclaim_target ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) (list_empty(&log->running_ios) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) list_empty(&log->io_end_ios) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) list_empty(&log->flushing_ios) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) list_empty(&log->finished_ios)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) md_wakeup_thread(log->rdev->mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) wait_event_lock_irq(log->iounit_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) r5l_reclaimable_space(log) > reclaimable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) next_checkpoint = r5c_calculate_new_cp(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (reclaimable == 0 || !write_super)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * write_super will flush cache of each raid disk. We must write super
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * here, because the log area might be reused soon and we don't want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * confuse recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) r5l_write_super_and_discard_space(log, next_checkpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) mutex_lock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) log->last_checkpoint = next_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) r5c_update_log_state(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) r5l_run_no_space_stripes(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static void r5l_reclaim_thread(struct md_thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct mddev *mddev = thread->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) r5c_do_reclaim(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) r5l_do_reclaim(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) unsigned long target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) unsigned long new = (unsigned long)space; /* overflow in theory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) target = log->reclaim_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (new < target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) } while (cmpxchg(&log->reclaim_target, target, new) != target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) md_wakeup_thread(log->reclaim_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) void r5l_quiesce(struct r5l_log *log, int quiesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct mddev *mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (quiesce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /* make sure r5l_write_super_and_discard_space exits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) wake_up(&mddev->sb_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) kthread_park(log->reclaim_thread->tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) r5l_wake_reclaim(log, MaxSector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) r5l_do_reclaim(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) kthread_unpark(log->reclaim_thread->tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) bool r5l_log_disk_error(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct r5l_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) /* don't allow write if journal disk is missing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) log = rcu_dereference(conf->log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) ret = test_bit(Faulty, &log->rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) #define R5L_RECOVERY_PAGE_POOL_SIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct r5l_recovery_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct page *meta_page; /* current meta */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) sector_t meta_total_blocks; /* total size of current meta and data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) sector_t pos; /* recovery position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) u64 seq; /* recovery position seq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) int data_parity_stripes; /* number of data_parity stripes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) int data_only_stripes; /* number of data_only stripes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) struct list_head cached_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * read ahead page pool (ra_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * in recovery, log is read sequentially. It is not efficient to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * read every page with sync_page_io(). The read ahead page pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * reads multiple pages with one IO, so further log read can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * just copy data from the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) sector_t pool_offset; /* offset of first page in the pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) int total_pages; /* total allocated pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) int valid_pages; /* pages with valid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) struct bio *ra_bio; /* bio to do the read ahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (!ctx->ra_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) ctx->valid_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) ctx->total_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) ctx->ra_pool[ctx->total_pages] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) ctx->total_pages += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (ctx->total_pages == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) bio_put(ctx->ra_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) ctx->pool_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) static void r5l_recovery_free_ra_pool(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) for (i = 0; i < ctx->total_pages; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) put_page(ctx->ra_pool[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) bio_put(ctx->ra_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * fetch ctx->valid_pages pages from offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * However, if the offset is close to the end of the journal device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * ctx->valid_pages could be smaller than ctx->total_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct r5l_recovery_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) sector_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) bio_reset(ctx->ra_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) bio_set_dev(ctx->ra_bio, log->rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) ctx->valid_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) ctx->pool_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) while (ctx->valid_pages < ctx->total_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) bio_add_page(ctx->ra_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) ctx->valid_pages += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (offset == 0) /* reached end of the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return submit_bio_wait(ctx->ra_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * try read a page from the read ahead page pool, if the page is not in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * pool, call r5l_recovery_fetch_ra_pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) static int r5l_recovery_read_page(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) struct r5l_recovery_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) sector_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (offset < ctx->pool_offset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) BUG_ON(offset < ctx->pool_offset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) memcpy(page_address(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) BLOCK_SECTOR_SHIFT]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) static int r5l_recovery_read_meta_block(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) struct page *page = ctx->meta_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct r5l_meta_block *mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) u32 crc, stored_crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) mb = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) stored_crc = le32_to_cpu(mb->checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) mb->checksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) le64_to_cpu(mb->seq) != ctx->seq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) mb->version != R5LOG_VERSION ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) le64_to_cpu(mb->position) != ctx->pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (stored_crc != crc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) ctx->meta_total_blocks = BLOCK_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) r5l_recovery_create_empty_meta_block(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) sector_t pos, u64 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) struct r5l_meta_block *mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) mb = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) clear_page(mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) mb->magic = cpu_to_le32(R5LOG_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) mb->version = R5LOG_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) mb->seq = cpu_to_le64(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) mb->position = cpu_to_le64(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) u64 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) struct r5l_meta_block *mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) r5l_recovery_create_empty_meta_block(log, page, pos, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) mb = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) mb, PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) REQ_SYNC | REQ_FUA, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * to mark valid (potentially not flushed) data in the journal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * so there should not be any mismatch here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) static void r5l_recovery_load_data(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) struct stripe_head *sh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct r5l_recovery_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) struct r5l_payload_data_parity *payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) sector_t log_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) int dd_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) raid5_compute_sector(conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) le64_to_cpu(payload->location), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) &dd_idx, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) sh->dev[dd_idx].log_checksum =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) le32_to_cpu(payload->checksum[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) ctx->meta_total_blocks += BLOCK_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) set_bit(STRIPE_R5C_CACHING, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static void r5l_recovery_load_parity(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct stripe_head *sh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct r5l_recovery_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct r5l_payload_data_parity *payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) sector_t log_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) sh->dev[sh->pd_idx].log_checksum =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) le32_to_cpu(payload->checksum[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (sh->qd_idx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) r5l_recovery_read_page(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) log, ctx, sh->dev[sh->qd_idx].page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) r5l_ring_add(log, log_offset, BLOCK_SECTORS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) sh->dev[sh->qd_idx].log_checksum =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) le32_to_cpu(payload->checksum[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) clear_bit(STRIPE_R5C_CACHING, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) static void r5l_recovery_reset_stripe(struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) sh->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) sh->log_start = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) for (i = sh->disks; i--; )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) sh->dev[i].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) r5l_recovery_replay_one_stripe(struct r5conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) struct stripe_head *sh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) struct md_rdev *rdev, *rrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) int disk_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) int data_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) for (disk_index = 0; disk_index < sh->disks; disk_index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) data_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * stripes that only have parity must have been flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * before the crash that we are now recovering from, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * there is nothing more to recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (data_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) for (disk_index = 0; disk_index < sh->disks; disk_index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) /* in case device is broken */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) rdev = rcu_dereference(conf->disks[disk_index].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) sync_page_io(rdev, sh->sector, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) sh->dev[disk_index].page, REQ_OP_WRITE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) rdev_dec_pending(rdev, rdev->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) rrdev = rcu_dereference(conf->disks[disk_index].replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (rrdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) atomic_inc(&rrdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) sync_page_io(rrdev, sh->sector, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) sh->dev[disk_index].page, REQ_OP_WRITE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) rdev_dec_pending(rrdev, rrdev->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) ctx->data_parity_stripes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) r5l_recovery_reset_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) static struct stripe_head *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) r5c_recovery_alloc_stripe(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct r5conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) sector_t stripe_sect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) int noblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (!sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) return NULL; /* no more stripe available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) r5l_recovery_reset_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) static struct stripe_head *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) list_for_each_entry(sh, list, lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (sh->sector == sect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) struct stripe_head *sh, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) r5l_recovery_reset_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) list_del_init(&sh->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) struct stripe_head *sh, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) list_del_init(&sh->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) /* if matches return 0; otherwise return -EINVAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) r5l_recovery_verify_data_checksum(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct r5l_recovery_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) sector_t log_offset, __le32 log_checksum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) u32 checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) r5l_recovery_read_page(log, ctx, page, log_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) * before loading data to stripe cache, we need verify checksum for all data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * if there is mismatch for any data page, we drop all data in the mata block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) struct r5l_meta_block *mb = page_address(ctx->meta_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) sector_t mb_offset = sizeof(struct r5l_meta_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) struct r5l_payload_data_parity *payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) struct r5l_payload_flush *payload_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) while (mb_offset < le32_to_cpu(mb->meta_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) payload = (void *)mb + mb_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) payload_flush = (void *)mb + mb_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (r5l_recovery_verify_data_checksum(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) log, ctx, page, log_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) payload->checksum[0]) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) goto mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (r5l_recovery_verify_data_checksum(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) log, ctx, page, log_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) payload->checksum[0]) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) goto mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (conf->max_degraded == 2 && /* q for RAID 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) r5l_recovery_verify_data_checksum(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) log, ctx, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) r5l_ring_add(log, log_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) BLOCK_SECTORS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) payload->checksum[1]) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) goto mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) goto mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) mb_offset += sizeof(struct r5l_payload_flush) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) le32_to_cpu(payload_flush->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) /* DATA or PARITY payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) log_offset = r5l_ring_add(log, log_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) le32_to_cpu(payload->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) mb_offset += sizeof(struct r5l_payload_data_parity) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) sizeof(__le32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) mismatch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * Analyze all data/parity pages in one meta block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * 0 for success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * -EINVAL for unknown playload type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * -EAGAIN for checksum mismatch of data page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) r5c_recovery_analyze_meta_block(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct r5l_recovery_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) struct list_head *cached_stripe_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct r5l_meta_block *mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct r5l_payload_data_parity *payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) struct r5l_payload_flush *payload_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) int mb_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) sector_t log_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) sector_t stripe_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) * for mismatch in data blocks, we will drop all data in this mb, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) * we will still read next mb for other data with FLUSH flag, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * io_unit could finish out of order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (ret == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) else if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) return ret; /* -ENOMEM duo to alloc_page() failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) mb = page_address(ctx->meta_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) mb_offset = sizeof(struct r5l_meta_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) while (mb_offset < le32_to_cpu(mb->meta_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) int dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) payload = (void *)mb + mb_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) payload_flush = (void *)mb + mb_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) int i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) for (i = 0; i < count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) sh = r5c_recovery_lookup_stripe(cached_stripe_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) stripe_sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (sh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) r5l_recovery_reset_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) list_del_init(&sh->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) mb_offset += sizeof(struct r5l_payload_flush) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) le32_to_cpu(payload_flush->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) /* DATA or PARITY payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) raid5_compute_sector(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) conf, le64_to_cpu(payload->location), 0, &dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) : le64_to_cpu(payload->location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) sh = r5c_recovery_lookup_stripe(cached_stripe_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) stripe_sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (!sh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * cannot get stripe from raid5_get_active_stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * try replay some stripes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) if (!sh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) r5c_recovery_replay_stripes(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) cached_stripe_list, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) sh = r5c_recovery_alloc_stripe(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) conf, stripe_sect, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (!sh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) int new_size = conf->min_nr_stripes * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) ret = raid5_set_cache_size(mddev, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) if (conf->min_nr_stripes <= new_size / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) new_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) conf->min_nr_stripes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) conf->max_nr_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) sh = r5c_recovery_alloc_stripe(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) conf, stripe_sect, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (!sh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) list_add_tail(&sh->lru, cached_stripe_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) r5l_recovery_replay_one_stripe(conf, sh, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) list_move_tail(&sh->lru, cached_stripe_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) r5l_recovery_load_data(log, sh, ctx, payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) log_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) r5l_recovery_load_parity(log, sh, ctx, payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) log_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) log_offset = r5l_ring_add(log, log_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) le32_to_cpu(payload->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) mb_offset += sizeof(struct r5l_payload_data_parity) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) sizeof(__le32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) * Load the stripe into cache. The stripe will be written out later by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) * the stripe cache state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) static void r5c_recovery_load_one_stripe(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) struct r5dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) for (i = sh->disks; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) dev = sh->dev + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) set_bit(R5_InJournal, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) set_bit(R5_UPTODATE, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * Scan through the log for all to-be-flushed data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * For stripes with data and parity, namely Data-Parity stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) * For stripes with only data, namely Data-Only stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * For a stripe, if we see data after parity, we should discard all previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * data and parity for this stripe, as these data are already flushed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * At the end of the scan, we return the new journal_tail, which points to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * first data-only stripe on the journal device, or next invalid meta block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) static int r5c_recovery_flush_log(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) /* scan through the log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (r5l_recovery_read_meta_block(log, ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) ret = r5c_recovery_analyze_meta_block(log, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) &ctx->cached_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * -EAGAIN means mismatch in data block, in this case, we still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * try scan the next metablock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if (ret && ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) break; /* ret == -EINVAL or -ENOMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) ctx->seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (ret == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /* replay data-parity stripes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) /* load data-only stripes to stripe cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) list_for_each_entry(sh, &ctx->cached_list, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) r5c_recovery_load_one_stripe(log, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) ctx->data_only_stripes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) * we did a recovery. Now ctx.pos points to an invalid meta block. New
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) * log will start here. but we can't let superblock point to last valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) * meta block. The log might looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) * | meta 1| meta 2| meta 3|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) * superblock points to meta 1, we write a new valid meta 2n. if crash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) * happens again, new recovery will start from meta 1. Since meta 2n is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * valid now, recovery will think meta 3 is valid, which is wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) * The solution is we create a new meta in meta2 with its seq == meta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) * 1's seq + 10000 and let superblock points to meta2. The same recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) * will not think meta 3 is a valid meta, because its seq doesn't match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) * Before recovery, the log looks like the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) * ---------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) * | valid log | invalid log |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * ---------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * |- log->last_checkpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * |- log->last_cp_seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * Now we scan through the log until we see invalid entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * ---------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) * | valid log | invalid log |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) * ---------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) * ^ ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) * |- log->last_checkpoint |- ctx->pos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) * |- log->last_cp_seq |- ctx->seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) * From this point, we need to increase seq number by 10 to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) * confusing next recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) * ---------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) * | valid log | invalid log |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) * ---------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * ^ ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) * |- log->last_checkpoint |- ctx->pos+1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) * |- log->last_cp_seq |- ctx->seq+10001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) * However, it is not safe to start the state machine yet, because data only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) * parities are not yet secured in RAID. To save these data only parities, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) * rewrite them from seq+11.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) * -----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) * | valid log | data only stripes | invalid log |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * -----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) * ^ ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * |- log->last_checkpoint |- ctx->pos+n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * |- log->last_cp_seq |- ctx->seq+10000+n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) * If failure happens again during this process, the recovery can safe start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) * again from log->last_checkpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) * Once data only stripes are rewritten to journal, we move log_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) * -----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) * | old log | data only stripes | invalid log |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * -----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) * ^ ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * |- log->last_checkpoint |- ctx->pos+n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) * |- log->last_cp_seq |- ctx->seq+10000+n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * Then we can safely start the state machine. If failure happens from this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * point on, the recovery will start from new log->last_checkpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) sector_t next_checkpoint = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) WARN_ON(list_empty(&ctx->cached_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) list_for_each_entry(sh, &ctx->cached_list, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) struct r5l_meta_block *mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) sector_t write_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) r5l_recovery_create_empty_meta_block(log, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) ctx->pos, ctx->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) mb = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) offset = le32_to_cpu(mb->meta_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) for (i = sh->disks; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) struct r5dev *dev = &sh->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) struct r5l_payload_data_parity *payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (test_bit(R5_InJournal, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) payload = (void *)mb + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) payload->header.type = cpu_to_le16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) R5LOG_PAYLOAD_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) payload->size = cpu_to_le32(BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) payload->location = cpu_to_le64(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) raid5_compute_blocknr(sh, i, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) addr = kmap_atomic(dev->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) payload->checksum[0] = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) crc32c_le(log->uuid_checksum, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) sync_page_io(log->rdev, write_pos, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) dev->page, REQ_OP_WRITE, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) write_pos = r5l_ring_add(log, write_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) offset += sizeof(__le32) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) sizeof(struct r5l_payload_data_parity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) mb->meta_size = cpu_to_le32(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) mb, PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) sh->log_start = ctx->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) atomic_inc(&log->stripe_in_journal_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) ctx->pos = write_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) ctx->seq += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) next_checkpoint = sh->log_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) log->next_checkpoint = next_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct r5l_recovery_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) struct stripe_head *sh, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) bool cleared_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (ctx->data_only_stripes == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) cleared_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) r5c_make_stripe_write_out(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) set_bit(STRIPE_HANDLE, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) list_del_init(&sh->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) /* reuse conf->wait_for_quiescent in recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) wait_event(conf->wait_for_quiescent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) atomic_read(&conf->active_stripes) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (cleared_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) static int r5l_recovery_log(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) struct r5l_recovery_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) sector_t pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) ctx->pos = log->last_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) ctx->seq = log->last_cp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) INIT_LIST_HEAD(&ctx->cached_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) ctx->meta_page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (!ctx->meta_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) goto meta_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) goto ra_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) ret = r5c_recovery_flush_log(log, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) pos = ctx->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) ctx->seq += 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) pr_info("md/raid:%s: starting from clean shutdown\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) mdname(mddev), ctx->data_only_stripes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) ctx->data_parity_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (ctx->data_only_stripes == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) log->next_checkpoint = ctx->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) log->log_start = ctx->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) log->seq = ctx->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) log->last_checkpoint = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) r5l_write_super(log, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) r5c_recovery_flush_data_only_stripes(log, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) r5l_recovery_free_ra_pool(log, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) ra_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) __free_page(ctx->meta_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) meta_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) static void r5l_write_super(struct r5l_log *log, sector_t cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) log->rdev->journal_tail = cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) struct r5conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) spin_lock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (!conf || !conf->log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) spin_unlock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) switch (conf->log->r5c_journal_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) case R5C_JOURNAL_MODE_WRITE_THROUGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) ret = snprintf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) page, PAGE_SIZE, "[%s] %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) case R5C_JOURNAL_MODE_WRITE_BACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) ret = snprintf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) page, PAGE_SIZE, "%s [%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) spin_unlock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) * Set journal cache mode on @mddev (external API initially needed by dm-raid).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) * @mode as defined in 'enum r5c_journal_mode'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) int r5c_journal_mode_set(struct mddev *mddev, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) struct r5conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) mode > R5C_JOURNAL_MODE_WRITE_BACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (!conf || !conf->log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (raid5_calc_degraded(conf) > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) mode == R5C_JOURNAL_MODE_WRITE_BACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) mddev_suspend(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) conf->log->r5c_journal_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) mddev_resume(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) mdname(mddev), mode, r5c_journal_mode_str[mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) EXPORT_SYMBOL(r5c_journal_mode_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) static ssize_t r5c_journal_mode_store(struct mddev *mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) const char *page, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) int mode = ARRAY_SIZE(r5c_journal_mode_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) size_t len = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) if (len < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (page[len - 1] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) while (mode--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (strlen(r5c_journal_mode_str[mode]) == len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) !strncmp(page, r5c_journal_mode_str[mode], len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) ret = mddev_lock(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) ret = r5c_journal_mode_set(mddev, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) mddev_unlock(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return ret ?: length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) struct md_sysfs_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) r5c_journal_mode = __ATTR(journal_mode, 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) r5c_journal_mode_show, r5c_journal_mode_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) * Try handle write operation in caching phase. This function should only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * be called in write-back mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) * If all outstanding writes can be handled in caching phase, returns 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) * If writes requires write-out phase, call r5c_make_stripe_write_out()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) * and returns -EAGAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) int r5c_try_caching_write(struct r5conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) struct stripe_head *sh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) struct stripe_head_state *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) int disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct r5dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) int to_cache = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) void **pslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) sector_t tree_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) uintptr_t refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) BUG_ON(!r5c_is_writeback(log));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) * There are two different scenarios here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * 1. The stripe has some data cached, and it is sent to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) * write-out phase for reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * 2. The stripe is clean, and this is the first write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) * For 1, return -EAGAIN, so we continue with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * handle_stripe_dirtying().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) * For 2, set STRIPE_R5C_CACHING and continue with caching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) * write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /* case 1: anything injournal or anything in written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) if (s->injournal > 0 || s->written > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) /* case 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) set_bit(STRIPE_R5C_CACHING, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) * When run in degraded mode, array is set to write-through mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) * This check helps drain pending write safely in the transition to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) * write-through mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) * When a stripe is syncing, the write is also handled in write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) * through mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) r5c_make_stripe_write_out(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) for (i = disks; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) dev = &sh->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) /* if non-overwrite, use writing-out phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) !test_bit(R5_InJournal, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) r5c_make_stripe_write_out(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) /* if the stripe is not counted in big_stripe_tree, add it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) tree_index = r5c_tree_index(conf, sh->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) spin_lock(&log->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) tree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) if (pslot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) refcount = (uintptr_t)radix_tree_deref_slot_protected(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) pslot, &log->tree_lock) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) R5C_RADIX_COUNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) radix_tree_replace_slot(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) &log->big_stripe_tree, pslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) * this radix_tree_insert can fail safely, so no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) * need to call radix_tree_preload()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) ret = radix_tree_insert(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) &log->big_stripe_tree, tree_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) (void *)(1 << R5C_RADIX_COUNT_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) spin_unlock(&log->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) r5c_make_stripe_write_out(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) spin_unlock(&log->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) * counted in the radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) atomic_inc(&conf->r5c_cached_partial_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) for (i = disks; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) dev = &sh->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (dev->towrite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) set_bit(R5_Wantwrite, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) set_bit(R5_Wantdrain, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) set_bit(R5_LOCKED, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) to_cache++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) if (to_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) * r5c_handle_data_cached()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) set_bit(STRIPE_LOG_TRAPPED, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * free extra pages (orig_page) we allocated for prexor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) void r5c_release_extra_page(struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) struct r5conf *conf = sh->raid_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) bool using_disk_info_extra_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) using_disk_info_extra_page =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) sh->dev[0].orig_page == conf->disks[0].extra_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) for (i = sh->disks; i--; )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) if (sh->dev[i].page != sh->dev[i].orig_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) struct page *p = sh->dev[i].orig_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) sh->dev[i].orig_page = sh->dev[i].page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) if (!using_disk_info_extra_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) put_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) if (using_disk_info_extra_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) md_wakeup_thread(conf->mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) void r5c_use_extra_page(struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) struct r5conf *conf = sh->raid_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) struct r5dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) for (i = sh->disks; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) dev = &sh->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (dev->orig_page != dev->page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) put_page(dev->orig_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) dev->orig_page = conf->disks[i].extra_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) * stripe is committed to RAID disks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) void r5c_finish_stripe_write_out(struct r5conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) struct stripe_head *sh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) struct stripe_head_state *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) int do_wakeup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) sector_t tree_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) void **pslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) uintptr_t refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) for (i = sh->disks; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) clear_bit(R5_InJournal, &sh->dev[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) do_wakeup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) * analyse_stripe() runs before r5c_finish_stripe_write_out(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) * We updated R5_InJournal, so we also update s->injournal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) s->injournal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) if (atomic_dec_and_test(&conf->pending_full_writes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) md_wakeup_thread(conf->mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) if (do_wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) wake_up(&conf->wait_for_overlap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) spin_lock_irq(&log->stripe_in_journal_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) list_del_init(&sh->r5c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) spin_unlock_irq(&log->stripe_in_journal_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) sh->log_start = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) atomic_dec(&log->stripe_in_journal_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) r5c_update_log_state(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) /* stop counting this stripe in big_stripe_tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) tree_index = r5c_tree_index(conf, sh->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) spin_lock(&log->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) tree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) BUG_ON(pslot == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) refcount = (uintptr_t)radix_tree_deref_slot_protected(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) pslot, &log->tree_lock) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) R5C_RADIX_COUNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) if (refcount == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) radix_tree_delete(&log->big_stripe_tree, tree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) radix_tree_replace_slot(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) &log->big_stripe_tree, pslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) spin_unlock(&log->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) atomic_dec(&conf->r5c_flushing_partial_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) atomic_dec(&conf->r5c_cached_partial_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) atomic_dec(&conf->r5c_flushing_full_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) atomic_dec(&conf->r5c_cached_full_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) r5l_append_flush_payload(log, sh->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) /* stripe is flused to raid disks, we can do resync now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) set_bit(STRIPE_HANDLE, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) struct r5conf *conf = sh->raid_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) int pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) int reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) BUG_ON(!log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) for (i = 0; i < sh->disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) addr = kmap_atomic(sh->dev[i].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) WARN_ON(pages == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) * The stripe must enter state machine again to call endio, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) * don't delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) clear_bit(STRIPE_DELAYED, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) atomic_inc(&sh->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) mutex_lock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) /* meta + data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) reserve = (1 + pages) << (PAGE_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) sh->log_start == MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) r5l_add_no_space_stripe(log, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) else if (!r5l_has_free_space(log, reserve)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) if (sh->log_start == log->last_checkpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) r5l_add_no_space_stripe(log, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) ret = r5l_log_stripe(log, sh, pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) spin_lock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) list_add_tail(&sh->log_list, &log->no_mem_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) /* check whether this big stripe is in write back cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) sector_t tree_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) void *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) WARN_ON_ONCE(!rcu_read_lock_held());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) tree_index = r5c_tree_index(conf, sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) return slot != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) static int r5l_load_log(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) struct md_rdev *rdev = log->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) struct r5l_meta_block *mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) sector_t cp = log->rdev->journal_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) u32 stored_crc, expected_crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) bool create_super = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) /* Make sure it's valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) cp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) goto ioerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) mb = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) mb->version != R5LOG_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) create_super = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) goto create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) stored_crc = le32_to_cpu(mb->checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) mb->checksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) if (stored_crc != expected_crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) create_super = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) goto create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) if (le64_to_cpu(mb->position) != cp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) create_super = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) goto create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) create:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) if (create_super) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) log->last_cp_seq = prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) cp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) * Make sure super points to correct address. Log might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) * data very soon. If super hasn't correct log tail address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) * recovery can't find the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) r5l_write_super(log, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) log->last_cp_seq = le64_to_cpu(mb->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) log->max_free_space = RECLAIM_MAX_FREE_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) log->last_checkpoint = cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) if (create_super) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) log->seq = log->last_cp_seq + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) log->next_checkpoint = cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) ret = r5l_recovery_log(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) r5c_update_log_state(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) ioerr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) int r5l_start(struct r5l_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) ret = r5l_load_log(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) struct mddev *mddev = log->rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) r5l_exit_log(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) if ((raid5_calc_degraded(conf) > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) test_bit(Journal, &rdev->flags)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) schedule_work(&log->disable_writeback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) struct request_queue *q = bdev_get_queue(rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) struct r5l_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) pr_debug("md/raid:%s: using device %s as journal\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) mdname(conf->mddev), bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) if (PAGE_SIZE != 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) * raid_disks r5l_payload_data_parity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) * Write journal and cache does not work for very big array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * (raid_disks > 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) if (sizeof(struct r5l_meta_block) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) conf->raid_disks) > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) mdname(conf->mddev), conf->raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) log = kzalloc(sizeof(*log), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) log->rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) sizeof(rdev->mddev->uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) mutex_init(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) spin_lock_init(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) INIT_LIST_HEAD(&log->running_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) INIT_LIST_HEAD(&log->io_end_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) INIT_LIST_HEAD(&log->flushing_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) INIT_LIST_HEAD(&log->finished_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) bio_init(&log->flush_bio, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) if (!log->io_kc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) goto io_kc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) goto io_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) goto io_bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) goto out_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) spin_lock_init(&log->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) log->rdev->mddev, "reclaim");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) if (!log->reclaim_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) goto reclaim_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) init_waitqueue_head(&log->iounit_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) INIT_LIST_HEAD(&log->no_mem_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) INIT_LIST_HEAD(&log->no_space_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) spin_lock_init(&log->no_space_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) INIT_LIST_HEAD(&log->stripe_in_journal_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) spin_lock_init(&log->stripe_in_journal_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) atomic_set(&log->stripe_in_journal_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) rcu_assign_pointer(conf->log, log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) reclaim_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) mempool_exit(&log->meta_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) out_mempool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) bioset_exit(&log->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) io_bs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) mempool_exit(&log->io_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) io_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) kmem_cache_destroy(log->io_kc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) io_kc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) kfree(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) void r5l_exit_log(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) struct r5l_log *log = conf->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) conf->log = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) /* Ensure disable_writeback_work wakes up and exits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) wake_up(&conf->mddev->sb_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) flush_work(&log->disable_writeback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) md_unregister_thread(&log->reclaim_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) mempool_exit(&log->meta_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) bioset_exit(&log->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) mempool_exit(&log->io_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) kmem_cache_destroy(log->io_kc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) kfree(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }