^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Partial Parity Log for closing the RAID5 write hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2017, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/crc32c.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/async_tx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/raid/md_p.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "md.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "raid5.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "raid5-log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * partial parity data. The header contains an array of entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * (struct ppl_header_entry) which describe the logged write requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Partial parity for the entries comes after the header, written in the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * sequence as the entries:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * entry0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * entryN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * PP data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * PP for entry0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * PP for entryN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * An entry describes one or more consecutive stripe_heads, up to a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * number of stripe_heads in the entry and n is the number of modified data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * disks. Every stripe_head in the entry must write to the same data disks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * An example of a valid case described by a single entry (writes to the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * stripe of a 4 disk array, 16k chunk size):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * sh->sector dd0 dd1 dd2 ppl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * +-----+-----+-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * 0 | --- | --- | --- | +----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * 8 | -W- | -W- | --- | | pp | data_sector = 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * 16 | -W- | -W- | --- | | pp | data_size = 3 * 2 * 4k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * 24 | -W- | -W- | --- | | pp | pp_size = 3 * 4k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * +-----+-----+-----+ +----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * data_sector is the first raid sector of the modified data, data_size is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * total size of modified data and pp_size is the size of partial parity for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * this entry. Entries for full stripe writes contain no partial parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * (pp_size = 0), they only mark the stripes for which parity should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * recalculated after an unclean shutdown. Every entry holds a checksum of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * partial parity, the header also has a checksum of the header itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * A write request is always logged to the PPL instance stored on the parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * disk of the corresponding stripe. For each member disk there is one ppl_log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * used to handle logging for this disk, independently from others. They are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * grouped in child_logs array in struct ppl_conf, which is assigned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * r5conf->log_private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * can be appended to the last entry if it meets the conditions for a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * entry described above, otherwise a new entry is added. Checksums of entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * are calculated incrementally as stripes containing partial parity are being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * added. ppl_submit_iounit() calculates the checksum of the header and submits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * a bio containing the header page and partial parity pages (sh->ppl_page) for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * all stripes of the io_unit. When the PPL write completes, the stripes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * associated with the io_unit are released and raid5d starts writing their data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * and parity. When all stripes are written, the io_unit is freed and the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * can be submitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * An io_unit is used to gather stripes until it is submitted or becomes full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * (if the maximum number of entries or size of PPL is reached). Another io_unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * can't be submitted until the previous has completed (PPL and stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * data+parity is written). The log->io_list tracks all io_units of a log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * (for a single member disk). New io_units are added to the end of the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * and the first io_unit is submitted, if it is not submitted already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * The current io_unit accepting new stripes is always at the end of the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * If write-back cache is enabled for any of the disks in the array, its data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * must be flushed before next io_unit is submitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define PPL_SPACE_SIZE (128 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct ppl_conf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct mddev *mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* array of child logs, one for each raid disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct ppl_log *child_logs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int block_size; /* the logical block size used for data_sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * in ppl_header_entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 signature; /* raid array identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) atomic64_t seq; /* current log write sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct kmem_cache *io_kc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mempool_t io_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct bio_set bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct bio_set flush_bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* used only for recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int recovered_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int mismatch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* stripes to retry if failed to allocate io_unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct list_head no_mem_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) spinlock_t no_mem_stripes_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned short write_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct ppl_log {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct ppl_conf *ppl_conf; /* shared between all log instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct md_rdev *rdev; /* array member disk associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * this log instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct mutex io_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct ppl_io_unit *current_io; /* current io_unit accepting new data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * always at the end of io_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spinlock_t io_list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct list_head io_list; /* all io_units of this log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) sector_t next_io_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned int entry_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) bool use_multippl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) bool wb_cache_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long disk_flush_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define PPL_IO_INLINE_BVECS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct ppl_io_unit {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct ppl_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct page *header_page; /* for ppl_header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned int entries_count; /* number of entries in ppl_header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned int pp_size; /* total size current of partial parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u64 seq; /* sequence number of this log write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct list_head log_sibling; /* log->io_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct list_head stripe_list; /* stripes added to the io_unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) atomic_t pending_stripes; /* how many stripes not written to raid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) atomic_t pending_flushes; /* how many disk flushes are in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bool submitted; /* true if write to log started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* inline bio and its biovec for submitting the iounit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct bio bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct bio_vec biovec[PPL_IO_INLINE_BVECS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int disks = sh->disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct page **srcs = percpu->scribble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int count = 0, pd_idx = sh->pd_idx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct async_submit_ctl submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Partial parity is the XOR of stripe data chunks that are not changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * during the write request. Depending on available data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * (read-modify-write vs. reconstruct-write case) we calculate it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * differently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * rmw: xor old data and parity from updated disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * This is calculated earlier by ops_run_prexor5() so just copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * the parity dev page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) srcs[count++] = sh->dev[pd_idx].page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) } else if (sh->reconstruct_state == reconstruct_state_drain_run) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* rcw: xor data from all not updated disks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) for (i = disks; i--;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct r5dev *dev = &sh->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (test_bit(R5_UPTODATE, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) srcs[count++] = dev->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) NULL, sh, (void *) (srcs + sh->disks + 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) &submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) &submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct kmem_cache *kc = pool_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct ppl_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) io = kmem_cache_alloc(kc, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) io->header_page = alloc_page(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!io->header_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) kmem_cache_free(kc, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void ppl_io_pool_free(void *element, void *pool_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct kmem_cache *kc = pool_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct ppl_io_unit *io = element;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) __free_page(io->header_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) kmem_cache_free(kc, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct ppl_conf *ppl_conf = log->ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct ppl_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct ppl_header *pplhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct page *header_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) header_page = io->header_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) memset(io, 0, sizeof(*io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) io->header_page = header_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) io->log = log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) INIT_LIST_HEAD(&io->log_sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) INIT_LIST_HEAD(&io->stripe_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) atomic_set(&io->pending_stripes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) atomic_set(&io->pending_flushes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) pplhdr = page_address(io->header_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) clear_page(pplhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) pplhdr->signature = cpu_to_le32(ppl_conf->signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) io->seq = atomic64_add_return(1, &ppl_conf->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) pplhdr->generation = cpu_to_le64(io->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct ppl_io_unit *io = log->current_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct ppl_header_entry *e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct ppl_header *pplhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) sector_t data_sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int data_disks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct r5conf *conf = sh->raid_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* check if current io_unit is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (io && (io->pp_size == log->entry_space ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) io->entries_count == PPL_HDR_MAX_ENTRIES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) pr_debug("%s: add io_unit blocked by seq: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) __func__, io->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* add a new unit if there is none or the current is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (!io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) io = ppl_new_iounit(log, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (!io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) spin_lock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) list_add_tail(&io->log_sibling, &log->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) log->current_io = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) for (i = 0; i < sh->disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct r5dev *dev = &sh->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!data_disks || dev->sector < data_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) data_sector = dev->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) data_disks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) BUG_ON(!data_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) io->seq, (unsigned long long)data_sector, data_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) pplhdr = page_address(io->header_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (io->entries_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct ppl_header_entry *last =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) &pplhdr->entries[io->entries_count - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct stripe_head *sh_last = list_last_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) &io->stripe_list, struct stripe_head, log_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u64 data_sector_last = le64_to_cpu(last->data_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) u32 data_size_last = le32_to_cpu(last->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Check if we can append the stripe to the last entry. It must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * be just after the last logged stripe and write to the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * disks. Use bit shift and logarithm to avoid 64-bit division.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) (data_sector >> ilog2(conf->chunk_sectors) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) data_sector_last >> ilog2(conf->chunk_sectors)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ((data_sector - data_sector_last) * data_disks ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) data_size_last >> 9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) e = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) e = &pplhdr->entries[io->entries_count++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) e->data_sector = cpu_to_le64(data_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) e->parity_disk = cpu_to_le32(sh->pd_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) e->checksum = cpu_to_le32(~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* don't write any PP if full stripe write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) le32_add_cpu(&e->pp_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) io->pp_size += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) page_address(sh->ppl_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) list_add_tail(&sh->log_list, &io->stripe_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) atomic_inc(&io->pending_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) sh->ppl_io = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct ppl_conf *ppl_conf = conf->log_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct ppl_io_unit *io = sh->ppl_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct ppl_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) log = &ppl_conf->child_logs[sh->pd_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) mutex_lock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) set_bit(STRIPE_LOG_TRAPPED, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) clear_bit(STRIPE_DELAYED, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) atomic_inc(&sh->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (ppl_log_stripe(log, sh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static void ppl_log_endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct ppl_io_unit *io = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct ppl_log *log = io->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct ppl_conf *ppl_conf = log->ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct stripe_head *sh, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) pr_debug("%s: seq: %llu\n", __func__, io->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) md_error(ppl_conf->mddev, log->rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) list_del_init(&sh->log_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) set_bit(STRIPE_HANDLE, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) __func__, io->seq, bio->bi_iter.bi_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) (unsigned long long)bio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) bio_devname(bio, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void ppl_submit_iounit(struct ppl_io_unit *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct ppl_log *log = io->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct ppl_conf *ppl_conf = log->ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct ppl_header *pplhdr = page_address(io->header_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct bio *bio = &io->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) bio->bi_private = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ppl_log_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) for (i = 0; i < io->entries_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct ppl_header_entry *e = &pplhdr->entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) __func__, io->seq, i, le64_to_cpu(e->data_sector),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ilog2(ppl_conf->block_size >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) pplhdr->entries_count = cpu_to_le32(io->entries_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Rewind the buffer if current PPL is larger then remaining space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (log->use_multippl &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) (PPL_HEADER_SIZE + io->pp_size) >> 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) log->next_io_sector = log->rdev->ppl.sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) bio->bi_end_io = ppl_log_endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) bio_set_dev(bio, log->rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) bio->bi_iter.bi_sector = log->next_io_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) bio->bi_write_hint = ppl_conf->write_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pr_debug("%s: log->current_io_sector: %llu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) (unsigned long long)log->next_io_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (log->use_multippl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) WARN_ON(log->disk_flush_bitmap != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) list_for_each_entry(sh, &io->stripe_list, log_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) for (i = 0; i < sh->disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct r5dev *dev = &sh->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if ((ppl_conf->child_logs[i].wb_cache_on) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) (test_bit(R5_Wantwrite, &dev->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) set_bit(i, &log->disk_flush_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* entries for full stripe writes have no partial parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (test_bit(STRIPE_FULL_WRITE, &sh->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct bio *prev = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) &ppl_conf->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) bio->bi_opf = prev->bi_opf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) bio->bi_write_hint = prev->bi_write_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) bio_copy_dev(bio, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) bio->bi_iter.bi_sector = bio_end_sector(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) bio_chain(bio, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ppl_submit_iounit_bio(io, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ppl_submit_iounit_bio(io, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static void ppl_submit_current_io(struct ppl_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct ppl_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spin_lock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) log_sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (io && io->submitted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) io->submitted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (io == log->current_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) log->current_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ppl_submit_iounit(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) void ppl_write_stripe_run(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct ppl_conf *ppl_conf = conf->log_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct ppl_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) for (i = 0; i < ppl_conf->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) log = &ppl_conf->child_logs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) mutex_lock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ppl_submit_current_io(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static void ppl_io_unit_finished(struct ppl_io_unit *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct ppl_log *log = io->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct ppl_conf *ppl_conf = log->ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct r5conf *conf = ppl_conf->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) pr_debug("%s: seq: %llu\n", __func__, io->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) spin_lock(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) list_del(&io->log_sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) spin_unlock(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) mempool_free(io, &ppl_conf->io_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) spin_lock(&ppl_conf->no_mem_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (!list_empty(&ppl_conf->no_mem_stripes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct stripe_head *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) sh = list_first_entry(&ppl_conf->no_mem_stripes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct stripe_head, log_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) list_del_init(&sh->log_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) set_bit(STRIPE_HANDLE, &sh->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) raid5_release_stripe(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) spin_unlock(&ppl_conf->no_mem_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) wake_up(&conf->wait_for_quiescent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static void ppl_flush_endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct ppl_io_unit *io = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct ppl_log *log = io->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct ppl_conf *ppl_conf = log->ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct r5conf *conf = ppl_conf->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) md_error(rdev->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (atomic_dec_and_test(&io->pending_flushes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ppl_io_unit_finished(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) md_wakeup_thread(conf->mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static void ppl_do_flush(struct ppl_io_unit *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct ppl_log *log = io->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct ppl_conf *ppl_conf = log->ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct r5conf *conf = ppl_conf->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int raid_disks = conf->raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int flushed_disks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) atomic_set(&io->pending_flushes, raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct block_device *bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) rdev = rcu_dereference(conf->disks[i].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (rdev && !test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) bdev = rdev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (bdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) bio_set_dev(bio, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) bio->bi_private = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) bio->bi_end_io = ppl_flush_endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pr_debug("%s: dev: %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) bio_devname(bio, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) flushed_disks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) log->disk_flush_bitmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) for (i = flushed_disks ; i < raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (atomic_dec_and_test(&io->pending_flushes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ppl_io_unit_finished(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct ppl_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct ppl_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) log_sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return !io || !io->submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) void ppl_quiesce(struct r5conf *conf, int quiesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct ppl_conf *ppl_conf = conf->log_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (quiesce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) for (i = 0; i < ppl_conf->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct ppl_log *log = &ppl_conf->child_logs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) spin_lock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) wait_event_lock_irq(conf->wait_for_quiescent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ppl_no_io_unit_submitted(conf, log),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) spin_unlock_irq(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (bio->bi_iter.bi_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) bio->bi_opf &= ~REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) void ppl_stripe_write_finished(struct stripe_head *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct ppl_io_unit *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) io = sh->ppl_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) sh->ppl_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (io && atomic_dec_and_test(&io->pending_stripes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (io->log->disk_flush_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ppl_do_flush(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ppl_io_unit_finished(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static void ppl_xor(int size, struct page *page1, struct page *page2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct async_submit_ctl submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct page *xor_srcs[] = { page1, page2 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) async_tx_quiesce(&tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * PPL recovery strategy: xor partial parity and data from all modified data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * disks within a stripe and write the result as the new stripe parity. If all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * stripe data disks are modified (full stripe write), no partial parity is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * available, so just xor the data disks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * Recovery of a PPL entry shall occur only if all modified data disks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * available and read from all of them succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * A PPL entry applies to a stripe, partial parity size for an entry is at most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * the size of the chunk. Examples of possible cases for a single entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * case 0: single data disk write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * data0 data1 data2 ppl parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * +--------+--------+--------+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * | ------ | ------ | ------ | +----+ | (no change) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * | ------ | ------ | ------ | +----+ | (no change) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * +--------+--------+--------+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * pp_size = data_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * case 1: more than one data disk write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * data0 data1 data2 ppl parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * +--------+--------+--------+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * | ------ | ------ | ------ | +----+ | (no change) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * | ------ | ------ | ------ | +----+ | (no change) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * +--------+--------+--------+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * pp_size = data_size / modified_data_disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * case 2: write to all data disks (also full stripe write):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * data0 data1 data2 parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * +--------+--------+--------+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * | ------ | ------ | ------ | | (no change) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * | -data- | -data- | -data- | --------> | xor all data |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * | ------ | ------ | ------ | --------> | (no change) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * | ------ | ------ | ------ | | (no change) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * +--------+--------+--------+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * pp_size = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * The following cases are possible only in other implementations. The recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * code can handle them, but they are not generated at runtime because they can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * be reduced to cases 0, 1 and 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * data0 data1 data2 ppl parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * +--------+--------+--------+ +----+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * | ------ | -data- | -data- | | pp | | data1 ^ data2 ^ pp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * | -data- | -data- | -data- | | -- | -> | xor all data |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * | -data- | -data- | ------ | | pp | | data0 ^ data1 ^ pp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * +--------+--------+--------+ +----+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * pp_size = chunk_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * data0 data1 data2 ppl parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * +--------+--------+--------+ +----+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * | ------ | -data- | ------ | | pp | | data1 ^ pp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * | ------ | ------ | ------ | | -- | -> | (no change) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * | ------ | ------ | ------ | | -- | -> | (no change) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * | -data- | ------ | ------ | | pp | | data0 ^ pp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * +--------+--------+--------+ +----+ +--------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * pp_size = chunk_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) sector_t ppl_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct ppl_conf *ppl_conf = log->ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct mddev *mddev = ppl_conf->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct r5conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) int block_size = ppl_conf->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct page *page1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct page *page2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) sector_t r_sector_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) sector_t r_sector_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int strip_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int data_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) unsigned int pp_size = le32_to_cpu(e->pp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) unsigned int data_size = le32_to_cpu(e->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) page1 = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) page2 = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (!page1 || !page2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if ((pp_size >> 9) < conf->chunk_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (pp_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) data_disks = data_size / pp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) strip_sectors = pp_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) data_disks = conf->raid_disks - conf->max_degraded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) strip_sectors = (data_size >> 9) / data_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) r_sector_last = r_sector_first +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) (data_disks - 1) * conf->chunk_sectors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) strip_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) data_disks = conf->raid_disks - conf->max_degraded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) strip_sectors = conf->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) r_sector_last = r_sector_first + (data_size >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) (unsigned long long)r_sector_first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) (unsigned long long)r_sector_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* if start and end is 4k aligned, use a 4k block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (block_size == 512 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) (r_sector_first & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) (r_sector_last & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) block_size = RAID5_STRIPE_SIZE(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* iterate through blocks in strip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) bool update_parity = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) sector_t parity_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct md_rdev *parity_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct stripe_head sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) int disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) int indent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) indent += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) memset(page_address(page1), 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* iterate through data member disks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) for (disk = 0; disk < data_disks; disk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) int dd_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) sector_t r_sector = r_sector_first + i +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) (disk * conf->chunk_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) pr_debug("%s:%*s data member disk %d start\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) __func__, indent, "", disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) indent += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (r_sector >= r_sector_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) __func__, indent, "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) (unsigned long long)r_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) indent -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) update_parity = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* map raid sector to member disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) sector = raid5_compute_sector(conf, r_sector, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) &dd_idx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) __func__, indent, "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) (unsigned long long)r_sector, dd_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) (unsigned long long)sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) rdev = conf->disks[dd_idx].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sector >= rdev->recovery_offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) pr_debug("%s:%*s data member disk %d missing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) __func__, indent, "", dd_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) update_parity = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) pr_debug("%s:%*s reading data member disk %s sector %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) __func__, indent, "", bdevname(rdev->bdev, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) (unsigned long long)sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!sync_page_io(rdev, sector, block_size, page2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) REQ_OP_READ, 0, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) pr_debug("%s:%*s read failed!\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) indent, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ppl_xor(block_size, page1, page2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) indent -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!update_parity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (pp_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) pr_debug("%s:%*s reading pp disk sector %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) __func__, indent, "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) (unsigned long long)(ppl_sector + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (!sync_page_io(log->rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ppl_sector - log->rdev->data_offset + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) block_size, page2, REQ_OP_READ, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) pr_debug("%s:%*s read failed!\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) indent, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) md_error(mddev, log->rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ppl_xor(block_size, page1, page2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* map raid sector to parity disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) parity_sector = raid5_compute_sector(conf, r_sector_first + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) 0, &disk, &sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) parity_rdev = conf->disks[sh.pd_idx].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) __func__, indent, "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) (unsigned long long)parity_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) bdevname(parity_rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (!sync_page_io(parity_rdev, parity_sector, block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) page1, REQ_OP_WRITE, 0, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) pr_debug("%s:%*s parity write error!\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) indent, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) md_error(mddev, parity_rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (page1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) __free_page(page1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (page2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) __free_page(page2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) sector_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct ppl_conf *ppl_conf = log->ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct md_rdev *rdev = log->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct mddev *mddev = rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) sector_t ppl_sector = rdev->ppl.sector + offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) (PPL_HEADER_SIZE >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* iterate through all PPL entries saved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct ppl_header_entry *e = &pplhdr->entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) u32 pp_size = le32_to_cpu(e->pp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) sector_t sector = ppl_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int ppl_entry_sectors = pp_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) u32 crc, crc_stored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) __func__, rdev->raid_disk, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) (unsigned long long)ppl_sector, pp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) crc = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) crc_stored = le32_to_cpu(e->checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* read parial parity for this entry and calculate its checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) while (pp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (!sync_page_io(rdev, sector - rdev->data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) s, page, REQ_OP_READ, 0, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) crc = crc32c_le(crc, page_address(page), s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) pp_size -= s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) sector += s >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) crc = ~crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (crc != crc_stored) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * Don't recover this entry if the checksum does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * match, but keep going and try to recover other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) __func__, crc_stored, crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ppl_conf->mismatch_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ret = ppl_recover_entry(log, e, ppl_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ppl_conf->recovered_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ppl_sector += ppl_entry_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* flush the disk cache after recovery if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static int ppl_write_empty_header(struct ppl_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct ppl_header *pplhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct md_rdev *rdev = log->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) page = alloc_page(GFP_NOIO | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) pplhdr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /* zero out PPL space to avoid collision with old PPLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) log->rdev->ppl.size, GFP_NOIO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) REQ_FUA, 0, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) md_error(rdev->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static int ppl_load_distributed(struct ppl_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct ppl_conf *ppl_conf = log->ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct md_rdev *rdev = log->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct mddev *mddev = rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct page *page, *page2, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) u32 crc, crc_stored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) u32 signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* read PPL headers, find the recent one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) page2 = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!page2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* searching ppl area for latest ppl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (!sync_page_io(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) rdev->ppl.sector - rdev->data_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 0, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* if not able to read - don't recover any PPL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) pplhdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) pplhdr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* check header validity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) crc_stored = le32_to_cpu(pplhdr->checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) pplhdr->checksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (crc_stored != crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) __func__, crc_stored, crc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) (unsigned long long)pplhdr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) pplhdr = prev_pplhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) pplhdr_offset = prev_pplhdr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) signature = le32_to_cpu(pplhdr->signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (mddev->external) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * For external metadata the header signature is set and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * validated in userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) ppl_conf->signature = signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) } else if (ppl_conf->signature != signature) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) __func__, signature, ppl_conf->signature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) (unsigned long long)pplhdr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) pplhdr = prev_pplhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) pplhdr_offset = prev_pplhdr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) le64_to_cpu(pplhdr->generation)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* previous was newest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) pplhdr = prev_pplhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) pplhdr_offset = prev_pplhdr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) prev_pplhdr_offset = pplhdr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) prev_pplhdr = pplhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) tmp = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) page = page2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) page2 = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /* calculate next potential ppl offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) pplhdr_offset +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) pplhdr_offset += PPL_HEADER_SIZE >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* no valid ppl found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!pplhdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) ppl_conf->mismatch_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) __func__, (unsigned long long)pplhdr_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) le64_to_cpu(pplhdr->generation));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* attempt to recover from log if we are starting a dirty array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ret = ppl_recover(log, pplhdr, pplhdr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* write empty header if we are starting the array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (!ret && !mddev->pers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ret = ppl_write_empty_header(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) __free_page(page2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) __func__, ret, ppl_conf->mismatch_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ppl_conf->recovered_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static int ppl_load(struct ppl_conf *ppl_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) u32 signature = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) bool signature_set = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) for (i = 0; i < ppl_conf->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct ppl_log *log = &ppl_conf->child_logs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /* skip missing drive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (!log->rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) ret = ppl_load_distributed(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * For external metadata we can't check if the signature is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * correct on a single drive, but we can check if it is the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * on all drives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (ppl_conf->mddev->external) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (!signature_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) signature = ppl_conf->signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) signature_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) } else if (signature != ppl_conf->signature) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) mdname(ppl_conf->mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) __func__, ret, ppl_conf->mismatch_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ppl_conf->recovered_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static void __ppl_exit_log(struct ppl_conf *ppl_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) kfree(ppl_conf->child_logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) bioset_exit(&ppl_conf->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) bioset_exit(&ppl_conf->flush_bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) mempool_exit(&ppl_conf->io_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) kmem_cache_destroy(ppl_conf->io_kc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) kfree(ppl_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) void ppl_exit_log(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct ppl_conf *ppl_conf = conf->log_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (ppl_conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) __ppl_exit_log(ppl_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) conf->log_private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static int ppl_validate_rdev(struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) int ppl_data_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) int ppl_size_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * The configured PPL size must be enough to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * the header and (at the very least) partial parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * for one stripe. Round it down to ensure the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * space is cleanly divisible by stripe size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (ppl_data_sectors > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) ppl_data_sectors = rounddown(ppl_data_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (ppl_data_sectors <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) pr_warn("md/raid:%s: PPL space too small on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) mdname(rdev->mddev), bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if ((rdev->ppl.sector < rdev->data_offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) (rdev->ppl.sector >= rdev->data_offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) mdname(rdev->mddev), bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!rdev->mddev->external &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) mdname(rdev->mddev), bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) rdev->ppl.size = ppl_size_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) PPL_HEADER_SIZE) * 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) log->use_multippl = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) set_bit(MD_HAS_MULTIPLE_PPLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) &log->ppl_conf->mddev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) log->entry_space = PPL_SPACE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) log->use_multippl = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) log->entry_space = (log->rdev->ppl.size << 9) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) PPL_HEADER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) log->next_io_sector = rdev->ppl.sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) q = bdev_get_queue(rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) log->wb_cache_on = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) int ppl_init_log(struct r5conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct ppl_conf *ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct mddev *mddev = conf->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) int max_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) mdname(conf->mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (PAGE_SIZE != 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (mddev->level != 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) mdname(mddev), mddev->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) pr_warn("md/raid:%s PPL is not compatible with journal\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (conf->raid_disks > max_disks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) mdname(mddev), max_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (!ppl_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) ppl_conf->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (!ppl_conf->io_kc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) ppl_io_pool_free, ppl_conf->io_kc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ppl_conf->count = conf->raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!ppl_conf->child_logs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) atomic64_set(&ppl_conf->seq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) spin_lock_init(&ppl_conf->no_mem_stripes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (!mddev->external) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ppl_conf->block_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) ppl_conf->block_size = queue_logical_block_size(mddev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) for (i = 0; i < ppl_conf->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct ppl_log *log = &ppl_conf->child_logs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct md_rdev *rdev = conf->disks[i].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) mutex_init(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) spin_lock_init(&log->io_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) INIT_LIST_HEAD(&log->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) log->ppl_conf = ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) log->rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ret = ppl_validate_rdev(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) ppl_init_child_log(log, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* load and possibly recover the logs from the member disks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) ret = ppl_load(ppl_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) } else if (!mddev->pers && mddev->recovery_cp == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) ppl_conf->recovered_entries > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ppl_conf->mismatch_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * If we are starting a dirty array and the recovery succeeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * without any issues, set the array as clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) mddev->recovery_cp = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /* no mismatch allowed when enabling PPL for a running array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) conf->log_private = ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) __ppl_exit_log(ppl_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct ppl_conf *ppl_conf = conf->log_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct ppl_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (!rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) pr_debug("%s: disk: %d operation: %s dev: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) __func__, rdev->raid_disk, add ? "add" : "remove",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (rdev->raid_disk < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (rdev->raid_disk >= ppl_conf->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) log = &ppl_conf->child_logs[rdev->raid_disk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) mutex_lock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (add) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) ret = ppl_validate_rdev(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) log->rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) ret = ppl_write_empty_header(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) ppl_init_child_log(log, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) log->rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) mutex_unlock(&log->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) ppl_write_hint_show(struct mddev *mddev, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) size_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) struct r5conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct ppl_conf *ppl_conf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) spin_lock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (conf && raid5_has_ppl(conf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) ppl_conf = conf->log_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) spin_unlock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) struct r5conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct ppl_conf *ppl_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) unsigned short new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (len >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (kstrtou16(page, 10, &new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) err = mddev_lock(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (!conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) } else if (raid5_has_ppl(conf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) ppl_conf = conf->log_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (!ppl_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) ppl_conf->write_hint = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) mddev_unlock(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) return err ?: len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) struct md_sysfs_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) ppl_write_hint_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ppl_write_hint_store);