^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * raid1.c : Multiple Devices driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * RAID-1 management functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * bitmapped intelligence in resync:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - bitmap marked during normal i/o
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * - bitmap used to skip nondirty blocks during sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * - persistent bitmap code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/interval_tree_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <trace/events/block.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "md.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "raid1.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "md-bitmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define UNSUPPORTED_MDDEV_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ((1L << MD_HAS_JOURNAL) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) (1L << MD_JOURNAL_CLEAN) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) (1L << MD_HAS_PPL) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) (1L << MD_HAS_MULTIPLE_PPLS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define raid1_log(md, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "raid1-10.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define START(node) ((node)->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define LAST(node) ((node)->last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) START, LAST, static inline, raid1_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct serial_info *si, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) sector_t lo = r1_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) sector_t hi = lo + r1_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct serial_in_rdev *serial = &rdev->serial[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) spin_lock_irqsave(&serial->serial_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* collision happened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) si->start = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) si->last = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) raid1_rb_insert(si, &serial->serial_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) spin_unlock_irqrestore(&serial->serial_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct mddev *mddev = rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct serial_info *si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int idx = sector_to_idx(r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct serial_in_rdev *serial = &rdev->serial[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (WARN_ON(!mddev->serial_info_pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) wait_event(serial->serial_io_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) check_and_add_serial(rdev, r1_bio, si, idx) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct serial_info *si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct mddev *mddev = rdev->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int idx = sector_to_idx(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct serial_in_rdev *serial = &rdev->serial[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) spin_lock_irqsave(&serial->serial_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) si; si = raid1_rb_iter_next(si, lo, hi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (si->start == lo && si->last == hi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) raid1_rb_remove(si, &serial->serial_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mempool_free(si, mddev->serial_info_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) WARN(1, "The write IO is not recorded for serialization\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) spin_unlock_irqrestore(&serial->serial_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) wake_up(&serial->serial_io_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * for resync bio, r1bio pointer can be retrieved from the per-bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * 'struct resync_pages'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline struct r1bio *get_resync_r1bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return get_resync_pages(bio)->raid_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct pool_info *pi = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int size = offsetof(struct r1bio, bios[pi->raid_disks]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* allocate a r1bio with room for raid_disks entries in the bios array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return kzalloc(size, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define RESYNC_DEPTH 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct pool_info *pi = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct r1bio *r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int need_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct resync_pages *rps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) r1_bio = r1bio_pool_alloc(gfp_flags, pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!rps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) goto out_free_r1bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Allocate bios : 1 for reading, n-1 for writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) for (j = pi->raid_disks ; j-- ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) goto out_free_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) r1_bio->bios[j] = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Allocate RESYNC_PAGES data pages and attach them to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * the first bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * If this is a user-requested check/repair, allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * RESYNC_PAGES for each bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) need_pages = pi->raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) need_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) for (j = 0; j < pi->raid_disks; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct resync_pages *rp = &rps[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) bio = r1_bio->bios[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (j < need_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (resync_alloc_pages(rp, gfp_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) goto out_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) memcpy(rp, &rps[0], sizeof(*rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) resync_get_all_pages(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) rp->raid_bio = r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bio->bi_private = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) r1_bio->master_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) out_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) while (--j >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) resync_free_pages(&rps[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) out_free_bio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) while (++j < pi->raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bio_put(r1_bio->bios[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) kfree(rps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) out_free_r1bio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) rbio_pool_free(r1_bio, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static void r1buf_pool_free(void *__r1_bio, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct pool_info *pi = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct r1bio *r1bio = __r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct resync_pages *rp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) for (i = pi->raid_disks; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rp = get_resync_pages(r1bio->bios[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) resync_free_pages(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bio_put(r1bio->bios[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* resync pages array stored in the 1st bio's .bi_private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) rbio_pool_free(r1bio, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) for (i = 0; i < conf->raid_disks * 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct bio **bio = r1_bio->bios + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!BIO_SPECIAL(*bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) bio_put(*bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void free_r1bio(struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct r1conf *conf = r1_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) put_all_bios(conf, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) mempool_free(r1_bio, &conf->r1bio_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void put_buf(struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct r1conf *conf = r1_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) sector_t sect = r1_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) for (i = 0; i < conf->raid_disks * 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct bio *bio = r1_bio->bios[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (bio->bi_end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mempool_free(r1_bio, &conf->r1buf_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) lower_barrier(conf, sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static void reschedule_retry(struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct mddev *mddev = r1_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) idx = sector_to_idx(r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) list_add(&r1_bio->retry_list, &conf->retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) atomic_inc(&conf->nr_queued[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * raid_end_bio_io() is called when we have finished servicing a mirrored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * operation and are ready to return a success/failure code to the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * cache layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void call_bio_endio(struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct bio *bio = r1_bio->master_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static void raid_end_bio_io(struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct bio *bio = r1_bio->master_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct r1conf *conf = r1_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* if nobody has done the final endio yet, do it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) (bio_data_dir(bio) == WRITE) ? "write" : "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) (unsigned long long) bio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) (unsigned long long) bio_end_sector(bio) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) call_bio_endio(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * Wake up any possible resync thread that waits for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * to go idle. All I/Os, even write-behind writes, are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) allow_barrier(conf, r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) free_r1bio(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Update disk head position estimator based on IRQ completion info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static inline void update_head_pos(int disk, struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct r1conf *conf = r1_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) conf->mirrors[disk].head_position =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) r1_bio->sector + (r1_bio->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Find the disk number which triggered given bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct r1conf *conf = r1_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int raid_disks = conf->raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) for (mirror = 0; mirror < raid_disks * 2; mirror++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (r1_bio->bios[mirror] == bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) BUG_ON(mirror == raid_disks * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) update_head_pos(mirror, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void raid1_end_read_request(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int uptodate = !bio->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct r1bio *r1_bio = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct r1conf *conf = r1_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * this branch is our 'one mirror IO has finished' event handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) update_head_pos(r1_bio->read_disk, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (uptodate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) set_bit(R1BIO_Uptodate, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) else if (test_bit(FailFast, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) test_bit(R1BIO_FailFast, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* This was a fail-fast read so we definitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * want to retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* If all other devices have failed, we want to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * the error upwards rather than fail the last device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Here we redefine "uptodate" to mean "Don't want to retry"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (r1_bio->mddev->degraded == conf->raid_disks ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) (r1_bio->mddev->degraded == conf->raid_disks-1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) test_bit(In_sync, &rdev->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) uptodate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (uptodate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) raid_end_bio_io(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) rdev_dec_pending(rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * oops, read error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) mdname(conf->mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) bdevname(rdev->bdev, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) (unsigned long long)r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) set_bit(R1BIO_ReadError, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) reschedule_retry(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* don't drop the reference on read_disk yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static void close_write(struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* it really is the end of this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) bio_free_pages(r1_bio->behind_master_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) bio_put(r1_bio->behind_master_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) r1_bio->behind_master_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* clear the bitmap if all writes complete successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) r1_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) !test_bit(R1BIO_Degraded, &r1_bio->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) test_bit(R1BIO_BehindIO, &r1_bio->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) md_write_end(r1_bio->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void r1_bio_write_done(struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (!atomic_dec_and_test(&r1_bio->remaining))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (test_bit(R1BIO_WriteError, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) reschedule_retry(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) close_write(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (test_bit(R1BIO_MadeGood, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) reschedule_retry(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) raid_end_bio_io(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static void raid1_end_write_request(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct r1bio *r1_bio = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct r1conf *conf = r1_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct bio *to_put = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int mirror = find_bio_disk(r1_bio, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct md_rdev *rdev = conf->mirrors[mirror].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) bool discard_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) sector_t lo = r1_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) sector_t hi = r1_bio->sector + r1_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * 'one mirror IO has finished' event handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (bio->bi_status && !discard_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) set_bit(WriteErrorSeen, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!test_and_set_bit(WantReplacement, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) set_bit(MD_RECOVERY_NEEDED, &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) conf->mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (test_bit(FailFast, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) (bio->bi_opf & MD_FAILFAST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* We never try FailFast to WriteMostly devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) !test_bit(WriteMostly, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) md_error(r1_bio->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * When the device is faulty, it is not necessary to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * handle write error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) set_bit(R1BIO_WriteError, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* Fail the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) set_bit(R1BIO_Degraded, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Finished with this branch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) r1_bio->bios[mirror] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) to_put = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * Set R1BIO_Uptodate in our master bio, so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * will return a good error code for to the higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * levels even if IO on some other mirrored buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * The 'master' represents the composite IO operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * to user-side. So if something waits for IO, then it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * will wait for the 'master' bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) r1_bio->bios[mirror] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) to_put = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * Do not set R1BIO_Uptodate if the current device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * rebuilding or Faulty. This is because we cannot use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * such device for properly reading the data back (we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * potentially use it, if the current write would have felt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * before rdev->recovery_offset, but for simplicity we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * check this here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (test_bit(In_sync, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) !test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) set_bit(R1BIO_Uptodate, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* Maybe we can clear some bad blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) &first_bad, &bad_sectors) && !discard_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) r1_bio->bios[mirror] = IO_MADE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) set_bit(R1BIO_MadeGood, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (behind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (test_bit(CollisionCheck, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) remove_serial(rdev, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (test_bit(WriteMostly, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) atomic_dec(&r1_bio->behind_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * In behind mode, we ACK the master bio once the I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * has safely reached all non-writemostly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * disks. Setting the Returned bit ensures that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * gets done only once -- we don't ever want to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * -EIO here, instead we'll wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) test_bit(R1BIO_Uptodate, &r1_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* Maybe we can return now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct bio *mbio = r1_bio->master_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pr_debug("raid1: behind end write sectors"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) " %llu-%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) (unsigned long long) mbio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) (unsigned long long) bio_end_sector(mbio) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) call_bio_endio(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) } else if (rdev->mddev->serialize_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) remove_serial(rdev, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (r1_bio->bios[mirror] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) rdev_dec_pending(rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Let's see if all mirrored write operations have finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) r1_bio_write_done(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (to_put)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) bio_put(to_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static sector_t align_to_barrier_unit_end(sector_t start_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) sector_t sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) sector_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) WARN_ON(sectors == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * len is the number of sectors from start_sector to end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * barrier unit which start_sector belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (len > sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) len = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * This routine returns the disk from which the requested read should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * be done. There is a per-array 'next expected sequential IO' sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * number - if this matches on the next IO then we use the last disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * There is also a per-disk 'last know head position' sector that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * maintained from IRQ contexts, both the normal and the resync IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * completion handlers update this position correctly. If there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * perfect sequential match then we pick the disk whose head is closest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * If there are 2 mirrors in the same 2 devices, performance degrades
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * because position is mirror, not device based.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * The rdev for the device selected will have nr_pending incremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) const sector_t this_sector = r1_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int best_good_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) int best_disk, best_dist_disk, best_pending_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int has_nonrot_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) sector_t best_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned int min_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) int choose_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) int choose_next_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * Check if we can balance. We can balance on the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * device if no resync is going on, or below the resync window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * We take the first readable disk when above the resync window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) sectors = r1_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) best_disk = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) best_dist_disk = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) best_dist = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) best_pending_disk = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) min_pending = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) best_good_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) has_nonrot_disk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) choose_next_idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) clear_bit(R1BIO_FailFast, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if ((conf->mddev->recovery_cp < this_sector + sectors) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) (mddev_is_clustered(conf->mddev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) this_sector + sectors)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) choose_first = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) choose_first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) sector_t dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) unsigned int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) bool nonrot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) rdev = rcu_dereference(conf->mirrors[disk].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (r1_bio->bios[disk] == IO_BLOCKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) || rdev == NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) || test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!test_bit(In_sync, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) rdev->recovery_offset < this_sector + sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (test_bit(WriteMostly, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Don't balance among write-mostly, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * use the first as a last resort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (best_dist_disk < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (is_badblock(rdev, this_sector, sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) &first_bad, &bad_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (first_bad <= this_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* Cannot use this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) best_good_sectors = first_bad - this_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) best_good_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) best_dist_disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) best_pending_disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* This is a reasonable device to use. It might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * even be best.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (is_badblock(rdev, this_sector, sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) &first_bad, &bad_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (best_dist < MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* already have a better device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (first_bad <= this_sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* cannot read here. If this is the 'primary'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * device, then we must not read beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * bad_sectors from another device..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) bad_sectors -= (this_sector - first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (choose_first && sectors > bad_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) sectors = bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (best_good_sectors > sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) best_good_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) sector_t good_sectors = first_bad - this_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (good_sectors > best_good_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) best_good_sectors = good_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) best_disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (choose_first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if ((sectors > best_good_sectors) && (best_disk >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) best_disk = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) best_good_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (best_disk >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* At least two disks to choose from so failfast is OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) set_bit(R1BIO_FailFast, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) has_nonrot_disk |= nonrot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) pending = atomic_read(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) dist = abs(this_sector - conf->mirrors[disk].head_position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (choose_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) best_disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* Don't change to another disk for sequential reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (conf->mirrors[disk].next_seq_sect == this_sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) || dist == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct raid1_info *mirror = &conf->mirrors[disk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) best_disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * If buffered sequential IO size exceeds optimal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * iosize, check if there is idle disk. If yes, choose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * the idle disk. read_balance could already choose an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * idle disk before noticing it's a sequential IO in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * this disk. This doesn't matter because this disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * will idle, next time it will be utilized after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * first disk has IO size exceeds optimal iosize. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * this way, iosize of the first disk will be optimal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * iosize at least. iosize of the second disk might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * small, but not a big deal since when the second disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * starts IO, the first disk is likely still busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (nonrot && opt_iosize > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mirror->seq_start != MaxSector &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) mirror->next_seq_sect > opt_iosize &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) mirror->next_seq_sect - opt_iosize >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mirror->seq_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) choose_next_idle = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (choose_next_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (min_pending > pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) min_pending = pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) best_pending_disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (dist < best_dist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) best_dist = dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) best_dist_disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * If all disks are rotational, choose the closest disk. If any disk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * non-rotational, choose the disk with less pending request even the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * disk is rotational, which might/might not be optimal for raids with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * mixed ratation/non-rotational disks depending on workload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (best_disk == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (has_nonrot_disk || min_pending == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) best_disk = best_pending_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) best_disk = best_dist_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (best_disk >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) sectors = best_good_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (conf->mirrors[best_disk].next_seq_sect != this_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) conf->mirrors[best_disk].seq_start = this_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) *max_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return best_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static void flush_bio_list(struct r1conf *conf, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* flush any pending bitmap writes to disk before proceeding w/ I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) md_bitmap_unplug(conf->mddev->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) while (bio) { /* submit pending writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct bio *next = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct md_rdev *rdev = (void *)bio->bi_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) bio_set_dev(bio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) !blk_queue_discard(bio->bi_disk->queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* Just ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) bio = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static void flush_pending_writes(struct r1conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* Any writes that have been queued but are awaiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * bitmap updates get flushed here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (conf->pending_bio_list.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) bio = bio_list_get(&conf->pending_bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) conf->pending_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * As this is called in a wait_event() loop (see freeze_array),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * current->state might be TASK_UNINTERRUPTIBLE which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * cause a warning when we prepare to wait again. As it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * rare that this path is taken, it is perfectly safe to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * us to go around the wait_event() loop again, so the warning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * is a false-positive. Silence the warning by resetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * thread state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) flush_bio_list(conf, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* Barriers....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * Sometimes we need to suspend IO while we do something else,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * either some resync/recovery, or reconfigure the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * To do this we raise a 'barrier'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * The 'barrier' is a counter that can be raised multiple times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * to count how many activities are happening which preclude
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * normal IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * We can only raise the barrier if there is no pending IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * i.e. if nr_pending == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * We choose only to raise the barrier if no-one is waiting for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * barrier to go down. This means that as soon as an IO request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * is ready, no other operations which require a barrier will start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * until the IO request has had a chance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * So: regular IO calls 'wait_barrier'. When that returns there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * is no backgroup IO happening, It must arrange to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * allow_barrier when it has finished its IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * backgroup IO calls must call raise_barrier. Once that returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * there is no normal IO happeing. It must arrange to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * lower_barrier when the particular background IO completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * If resync/recovery is interrupted, returns -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * Otherwise, returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) int idx = sector_to_idx(sector_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) spin_lock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* Wait until no block IO is waiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) wait_event_lock_irq(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) !atomic_read(&conf->nr_waiting[idx]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* block any new IO from starting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) atomic_inc(&conf->barrier[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * In raise_barrier() we firstly increase conf->barrier[idx] then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * check conf->nr_pending[idx]. In _wait_barrier() we firstly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * increase conf->nr_pending[idx] then check conf->barrier[idx].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * A memory barrier here to make sure conf->nr_pending[idx] won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * be fetched before conf->barrier[idx] is increased. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * there will be a race between raise_barrier() and _wait_barrier().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* For these conditions we must wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * A: while the array is in frozen state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * existing in corresponding I/O barrier bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * max resync count which allowed on current I/O barrier bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) wait_event_lock_irq(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) (!conf->array_frozen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) !atomic_read(&conf->nr_pending[idx]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) atomic_dec(&conf->barrier[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) atomic_inc(&conf->nr_sync_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int idx = sector_to_idx(sector_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) atomic_dec(&conf->barrier[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) atomic_dec(&conf->nr_sync_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static void _wait_barrier(struct r1conf *conf, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * We need to increase conf->nr_pending[idx] very early here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * then raise_barrier() can be blocked when it waits for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * conf->nr_pending[idx] to be 0. Then we can avoid holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * conf->resync_lock when there is no barrier raised in same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * barrier unit bucket. Also if the array is frozen, I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * should be blocked until array is unfrozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) atomic_inc(&conf->nr_pending[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * check conf->barrier[idx]. In raise_barrier() we firstly increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * barrier is necessary here to make sure conf->barrier[idx] won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * fetched before conf->nr_pending[idx] is increased. Otherwise there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * will be a race between _wait_barrier() and raise_barrier().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * Don't worry about checking two atomic_t variables at same time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * here. If during we check conf->barrier[idx], the array is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * 0, it is safe to return and make the I/O continue. Because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * array is frozen, all I/O returned here will eventually complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * or be queued, no race will happen. See code comment in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * frozen_array().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (!READ_ONCE(conf->array_frozen) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) !atomic_read(&conf->barrier[idx]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * After holding conf->resync_lock, conf->nr_pending[idx]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * should be decreased before waiting for barrier to drop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * Otherwise, we may encounter a race condition because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * raise_barrer() might be waiting for conf->nr_pending[idx]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * to be 0 at same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) spin_lock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) atomic_inc(&conf->nr_waiting[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) atomic_dec(&conf->nr_pending[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * In case freeze_array() is waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * get_unqueued_pending() == extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* Wait for the barrier in same barrier unit bucket to drop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) wait_event_lock_irq(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) !conf->array_frozen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) !atomic_read(&conf->barrier[idx]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) atomic_inc(&conf->nr_pending[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) atomic_dec(&conf->nr_waiting[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int idx = sector_to_idx(sector_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * Very similar to _wait_barrier(). The difference is, for read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * I/O we don't need wait for sync I/O, but if the whole array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * is frozen, the read I/O still has to wait until the array is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * unfrozen. Since there is no ordering requirement with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * conf->barrier[idx] here, memory barrier is unnecessary as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) atomic_inc(&conf->nr_pending[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (!READ_ONCE(conf->array_frozen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) spin_lock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) atomic_inc(&conf->nr_waiting[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) atomic_dec(&conf->nr_pending[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * In case freeze_array() is waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * get_unqueued_pending() == extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* Wait for array to be unfrozen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) wait_event_lock_irq(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) !conf->array_frozen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) atomic_inc(&conf->nr_pending[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) atomic_dec(&conf->nr_waiting[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int idx = sector_to_idx(sector_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) _wait_barrier(conf, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static void _allow_barrier(struct r1conf *conf, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) atomic_dec(&conf->nr_pending[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int idx = sector_to_idx(sector_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) _allow_barrier(conf, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* conf->resync_lock should be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static int get_unqueued_pending(struct r1conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ret = atomic_read(&conf->nr_sync_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) ret += atomic_read(&conf->nr_pending[idx]) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) atomic_read(&conf->nr_queued[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static void freeze_array(struct r1conf *conf, int extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* Stop sync I/O and normal I/O and wait for everything to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * go quiet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * This is called in two situations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * 1) management command handlers (reshape, remove disk, quiesce).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * 2) one normal I/O request failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * After array_frozen is set to 1, new sync IO will be blocked at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * or wait_read_barrier(). The flying I/Os will either complete or be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * queued. When everything goes quite, there are only queued I/Os left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * barrier bucket index which this I/O request hits. When all sync and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * normal I/O are queued, sum of all conf->nr_pending[] will match sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * of all conf->nr_queued[]. But normal I/O failure is an exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * in handle_read_error(), we may call freeze_array() before trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * fix the read error. In this case, the error read I/O is not queued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * so get_unqueued_pending() == 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * Therefore before this function returns, we need to wait until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * get_unqueued_pendings(conf) gets equal to extra. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * normal I/O context, extra is 1, in rested situations extra is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) spin_lock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) conf->array_frozen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) raid1_log(conf->mddev, "wait freeze");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) wait_event_lock_irq_cmd(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) get_unqueued_pending(conf) == extra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) conf->resync_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) flush_pending_writes(conf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static void unfreeze_array(struct r1conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* reverse the effect of the freeze */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) spin_lock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) conf->array_frozen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static void alloc_behind_master_bio(struct r1bio *r1_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) int size = bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) struct bio *behind_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (!behind_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* discard op, we don't support writezero/writesame yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (!bio_has_data(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) behind_bio->bi_iter.bi_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) goto skip_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) behind_bio->bi_write_hint = bio->bi_write_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) while (i < vcnt && size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) int len = min_t(int, PAGE_SIZE, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) page = alloc_page(GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (unlikely(!page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) goto free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) bio_add_page(behind_bio, page, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) size -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) bio_copy_data(behind_bio, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) skip_copy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) r1_bio->behind_master_bio = behind_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) set_bit(R1BIO_BehindIO, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) pr_debug("%dB behind alloc failed, doing sync I/O\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) bio_free_pages(behind_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) bio_put(behind_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct raid1_plug_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct blk_plug_cb cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct bio_list pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int pending_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct mddev *mddev = plug->cb.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (from_schedule || current->bio_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) bio_list_merge(&conf->pending_bio_list, &plug->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) conf->pending_count += plug->pending_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) kfree(plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /* we aren't scheduling, so we can do the write-out directly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) bio = bio_list_get(&plug->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) flush_bio_list(conf, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) kfree(plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) r1_bio->master_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) r1_bio->sectors = bio_sectors(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) r1_bio->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) r1_bio->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) r1_bio->sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static inline struct r1bio *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) alloc_r1bio(struct mddev *mddev, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct r1bio *r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* Ensure no bio records IO_BLOCKED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) init_r1bio(r1_bio, mddev, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) static void raid1_read_request(struct mddev *mddev, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int max_read_sectors, struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct raid1_info *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct bio *read_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct bitmap *bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) const int op = bio_op(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) int max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) int rdisk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) bool print_msg = !!r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * If r1_bio is set, we are blocking the raid1d thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * so there is a tiny risk of deadlock. So ask for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * emergency memory if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (print_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* Need to get the block device name carefully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) bdevname(rdev->bdev, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) strcpy(b, "???");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * Still need barrier for READ in case that whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * array is frozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) wait_read_barrier(conf, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (!r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) r1_bio = alloc_r1bio(mddev, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) init_r1bio(r1_bio, mddev, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) r1_bio->sectors = max_read_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * make_request() can abort the operation when read-ahead is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * used and no empty request is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) rdisk = read_balance(conf, r1_bio, &max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (rdisk < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /* couldn't find anywhere to read from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (print_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) (unsigned long long)r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) raid_end_bio_io(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) mirror = conf->mirrors + rdisk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (print_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) (unsigned long long)r1_bio->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) bdevname(mirror->rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (test_bit(WriteMostly, &mirror->rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * Reading from a write-mostly device must take care not to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * over-take any writes that are 'behind'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) raid1_log(mddev, "wait behind writes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) wait_event(bitmap->behind_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) atomic_read(&bitmap->behind_writes) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (max_sectors < bio_sectors(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct bio *split = bio_split(bio, max_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) gfp, &conf->bio_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) bio_chain(split, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) bio = split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) r1_bio->master_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) r1_bio->sectors = max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) r1_bio->read_disk = rdisk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) r1_bio->bios[rdisk] = read_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) read_bio->bi_iter.bi_sector = r1_bio->sector +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) mirror->rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) bio_set_dev(read_bio, mirror->rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) read_bio->bi_end_io = raid1_end_read_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) bio_set_op_attrs(read_bio, op, do_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (test_bit(FailFast, &mirror->rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) test_bit(R1BIO_FailFast, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) read_bio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) read_bio->bi_private = r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) disk_devt(mddev->gendisk), r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) submit_bio_noacct(read_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static void raid1_write_request(struct mddev *mddev, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int max_write_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) struct r1bio *r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) int i, disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct bitmap *bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct md_rdev *blocked_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) struct blk_plug_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) struct raid1_plug_cb *plug = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) int first_clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) int max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (mddev_is_clustered(mddev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) md_cluster_ops->area_resyncing(mddev, WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) bio->bi_iter.bi_sector, bio_end_sector(bio))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) DEFINE_WAIT(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) prepare_to_wait(&conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) &w, TASK_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (!md_cluster_ops->area_resyncing(mddev, WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) bio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) bio_end_sector(bio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) finish_wait(&conf->wait_barrier, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * Register the new request and wait if the reconstruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * thread has put up a bar for new requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * Continue immediately if no resync is active currently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) wait_barrier(conf, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) r1_bio = alloc_r1bio(mddev, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) r1_bio->sectors = max_write_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (conf->pending_count >= max_queued_requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) raid1_log(mddev, "wait queued");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) wait_event(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) conf->pending_count < max_queued_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /* first select target devices under rcu_lock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * inc refcount on their rdev. Record them by setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * bios[x] to bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * If there are known/acknowledged bad blocks on any device on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * which we have seen a write error, we want to avoid writing those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * This potentially requires several writes to write around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * the bad blocks. Each set of writes gets it's own r1bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * with a set of bios attached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) disks = conf->raid_disks * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) retry_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) blocked_rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) max_sectors = r1_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) for (i = 0; i < disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) blocked_rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) r1_bio->bios[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (!rdev || test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (i < conf->raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) set_bit(R1BIO_Degraded, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (test_bit(WriteErrorSeen, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) int is_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) &first_bad, &bad_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (is_bad < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /* mustn't write here until the bad block is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * acknowledged*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) set_bit(BlockedBadBlocks, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) blocked_rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (is_bad && first_bad <= r1_bio->sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /* Cannot write here at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) bad_sectors -= (r1_bio->sector - first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (bad_sectors < max_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /* mustn't write more than bad_sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * to other devices yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) max_sectors = bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* We don't set R1BIO_Degraded as that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * only applies if the disk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * missing, so it might be re-added,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * and we want to know to recover this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * In this case the device is here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * and the fact that this chunk is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * in-sync is recorded in the bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * block log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (is_bad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) int good_sectors = first_bad - r1_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (good_sectors < max_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) max_sectors = good_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) r1_bio->bios[i] = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (unlikely(blocked_rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /* Wait for this device to become unblocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (r1_bio->bios[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) rdev_dec_pending(conf->mirrors[j].rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) r1_bio->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) allow_barrier(conf, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) md_wait_for_blocked_rdev(blocked_rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) wait_barrier(conf, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) goto retry_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (max_sectors < bio_sectors(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct bio *split = bio_split(bio, max_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) GFP_NOIO, &conf->bio_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) bio_chain(split, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) bio = split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) r1_bio->master_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) r1_bio->sectors = max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) atomic_set(&r1_bio->remaining, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) atomic_set(&r1_bio->behind_remaining, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) first_clone = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) for (i = 0; i < disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct bio *mbio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct md_rdev *rdev = conf->mirrors[i].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (!r1_bio->bios[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (first_clone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* do behind I/O ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * Not if there are too many, or cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * allocate memory, or a reader on WriteMostly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * is waiting for behind writes to flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (bitmap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) (atomic_read(&bitmap->behind_writes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) < mddev->bitmap_info.max_write_behind) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) !waitqueue_active(&bitmap->behind_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) alloc_behind_master_bio(r1_bio, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) test_bit(R1BIO_BehindIO, &r1_bio->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) first_clone = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (r1_bio->behind_master_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) mbio = bio_clone_fast(r1_bio->behind_master_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) GFP_NOIO, &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (r1_bio->behind_master_bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (test_bit(CollisionCheck, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) wait_for_serialization(rdev, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (test_bit(WriteMostly, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) atomic_inc(&r1_bio->behind_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) } else if (mddev->serialize_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) wait_for_serialization(rdev, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) r1_bio->bios[i] = mbio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) mbio->bi_iter.bi_sector = (r1_bio->sector +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) conf->mirrors[i].rdev->data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) mbio->bi_end_io = raid1_end_write_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) conf->raid_disks - mddev->degraded > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) mbio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) mbio->bi_private = r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) atomic_inc(&r1_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) trace_block_bio_remap(mbio->bi_disk->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) mbio, disk_devt(mddev->gendisk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /* flush_pending_writes() needs access to the rdev so...*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) mbio->bi_disk = (void *)conf->mirrors[i].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) plug = container_of(cb, struct raid1_plug_cb, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) plug = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (plug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) bio_list_add(&plug->pending, mbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) plug->pending_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) bio_list_add(&conf->pending_bio_list, mbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) conf->pending_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) r1_bio_write_done(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /* In case raid1d snuck in to freeze_array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) sector_t sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (unlikely(bio->bi_opf & REQ_PREFLUSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) && md_flush_request(mddev, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * There is a limit to the maximum size, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * the read/write handler might find a lower limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * due to bad blocks. To avoid multiple splits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) * we pass the maximum number of sectors down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * and let the lower level perform the split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) sectors = align_to_barrier_unit_end(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) bio->bi_iter.bi_sector, bio_sectors(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (bio_data_dir(bio) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) raid1_read_request(mddev, bio, sectors, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (!md_write_start(mddev,bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) raid1_write_request(mddev, bio, sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static void raid1_status(struct seq_file *seq, struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) seq_printf(seq, " [%d/%d] [", conf->raid_disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) conf->raid_disks - mddev->degraded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) for (i = 0; i < conf->raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) seq_printf(seq, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) seq_printf(seq, "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * If it is not operational, then we have already marked it as dead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * else if it is the last working disks with "fail_last_dev == false",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * ignore the error, let the next level up know.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * else mark the drive as failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) && (conf->raid_disks - mddev->degraded) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * Don't fail the drive, act as though we were just a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * normal single drive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) * However don't try a recovery from this drive as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) * it is very likely to fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) conf->recovery_disabled = mddev->recovery_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) set_bit(Blocked, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (test_and_clear_bit(In_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) mddev->degraded++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) set_bit(Faulty, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * if recovery is running, make sure it aborts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) set_bit(MD_RECOVERY_INTR, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) set_mask_bits(&mddev->sb_flags, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) "md/raid1:%s: Operation continuing on %d devices.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) mdname(mddev), bdevname(rdev->bdev, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) mdname(mddev), conf->raid_disks - mddev->degraded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static void print_conf(struct r1conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) pr_debug("RAID1 conf printout:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (!conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) pr_debug("(!conf)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) conf->raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) for (i = 0; i < conf->raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) i, !test_bit(In_sync, &rdev->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) !test_bit(Faulty, &rdev->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) bdevname(rdev->bdev,b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) static void close_sync(struct r1conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) _wait_barrier(conf, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) _allow_barrier(conf, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) mempool_exit(&conf->r1buf_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) static int raid1_spare_active(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * Find all failed disks within the RAID1 configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * and mark them readable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * Called under mddev lock, so rcu protection not needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * device_lock used to avoid races with raid1_end_read_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * which expects 'In_sync' flags and ->degraded to be consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) for (i = 0; i < conf->raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct md_rdev *rdev = conf->mirrors[i].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (repl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) && !test_bit(Candidate, &repl->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) && repl->recovery_offset == MaxSector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) && !test_bit(Faulty, &repl->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) && !test_and_set_bit(In_sync, &repl->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) /* replacement has just become active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (!rdev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) !test_and_clear_bit(In_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /* Replaced device not technically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * faulty, but we need to be sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * it gets removed and never re-added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) set_bit(Faulty, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) sysfs_notify_dirent_safe(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) rdev->sysfs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (rdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) && rdev->recovery_offset == MaxSector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) && !test_bit(Faulty, &rdev->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) && !test_and_set_bit(In_sync, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) sysfs_notify_dirent_safe(rdev->sysfs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) mddev->degraded -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) print_conf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) int err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) int mirror = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) struct raid1_info *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) int first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) int last = conf->raid_disks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (mddev->recovery_disabled == conf->recovery_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (md_integrity_add_rdev(rdev, mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (rdev->raid_disk >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) first = last = rdev->raid_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * find the disk ... but prefer rdev->saved_raid_disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (rdev->saved_raid_disk >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) rdev->saved_raid_disk >= first &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) rdev->saved_raid_disk < conf->raid_disks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) first = last = rdev->saved_raid_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) for (mirror = first; mirror <= last; mirror++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) p = conf->mirrors + mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (!p->rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) disk_stack_limits(mddev->gendisk, rdev->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) rdev->data_offset << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) p->head_position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) rdev->raid_disk = mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /* As all devices are equivalent, we don't need a full recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * if this was recently any drive of the array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (rdev->saved_raid_disk < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) conf->fullsync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) rcu_assign_pointer(p->rdev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (test_bit(WantReplacement, &p->rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) p[conf->raid_disks].rdev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) /* Add this device as a replacement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) clear_bit(In_sync, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) set_bit(Replacement, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) rdev->raid_disk = mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) conf->fullsync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) print_conf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) int number = rdev->raid_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) struct raid1_info *p = conf->mirrors + number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (rdev != p->rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) p = conf->mirrors + conf->raid_disks + number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) print_conf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (rdev == p->rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (test_bit(In_sync, &rdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) atomic_read(&rdev->nr_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /* Only remove non-faulty devices if recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * is not possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (!test_bit(Faulty, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) mddev->recovery_disabled != conf->recovery_disabled &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) mddev->degraded < conf->raid_disks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) p->rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (!test_bit(RemoveSynchronized, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (atomic_read(&rdev->nr_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) /* lost the race, try later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) p->rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (conf->mirrors[conf->raid_disks + number].rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) /* We just removed a device that is being replaced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * Move down the replacement. We drain all IO before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * doing this to avoid confusion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) struct md_rdev *repl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) conf->mirrors[conf->raid_disks + number].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) freeze_array(conf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (atomic_read(&repl->nr_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) /* It means that some queued IO of retry_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * hold repl. Thus, we cannot set replacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * as NULL, avoiding rdev NULL pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * dereference in sync_request_write and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * handle_write_finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) unfreeze_array(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) clear_bit(Replacement, &repl->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) p->rdev = repl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) conf->mirrors[conf->raid_disks + number].rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) unfreeze_array(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) clear_bit(WantReplacement, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) err = md_integrity_register(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) print_conf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) static void end_sync_read(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct r1bio *r1_bio = get_resync_r1bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) update_head_pos(r1_bio->read_disk, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * we have read a block, now it needs to be re-written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * or re-read if the read failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * We don't do much here, just schedule handling by raid1d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (!bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) set_bit(R1BIO_Uptodate, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (atomic_dec_and_test(&r1_bio->remaining))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) reschedule_retry(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) sector_t sync_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) sector_t s = r1_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) long sectors_to_go = r1_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) /* make sure these bits don't get cleared. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) s += sync_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) sectors_to_go -= sync_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) } while (sectors_to_go > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (atomic_dec_and_test(&r1_bio->remaining)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) struct mddev *mddev = r1_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) int s = r1_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) test_bit(R1BIO_WriteError, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) reschedule_retry(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) put_buf(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) md_done_sync(mddev, s, uptodate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static void end_sync_write(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) int uptodate = !bio->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) struct r1bio *r1_bio = get_resync_r1bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) struct mddev *mddev = r1_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (!uptodate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) abort_sync_write(mddev, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) set_bit(WriteErrorSeen, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (!test_and_set_bit(WantReplacement, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) set_bit(MD_RECOVERY_NEEDED, &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) set_bit(R1BIO_WriteError, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) &first_bad, &bad_sectors) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) r1_bio->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) r1_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) &first_bad, &bad_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) set_bit(R1BIO_MadeGood, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) put_sync_write_buf(r1_bio, uptodate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) int sectors, struct page *page, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (rw == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) set_bit(WriteErrorSeen, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (!test_and_set_bit(WantReplacement,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) set_bit(MD_RECOVERY_NEEDED, &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) rdev->mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /* need to record an error - either for the block or the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (!rdev_set_badblocks(rdev, sector, sectors, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) md_error(rdev->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) static int fix_sync_read_error(struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) /* Try some synchronous reads of other devices to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * good data, much like with normal read errors. Only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * read into the pages we already have so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * need to re-issue the read request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) * We don't need to freeze the array, because being in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * active sync request, there is no normal IO, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * no overlapping syncs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * We don't need to check is_badblock() again as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) * made sure that anything with a bad block in range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * will have bi_end_io clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) struct mddev *mddev = r1_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct bio *bio = r1_bio->bios[r1_bio->read_disk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) struct page **pages = get_resync_pages(bio)->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) sector_t sect = r1_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) int sectors = r1_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) rdev = conf->mirrors[r1_bio->read_disk].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (test_bit(FailFast, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) /* Don't try recovering from here - just fail it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * ... unless it is the last working device of course */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /* Don't try to read from here, but make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * put_buf does it's thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) bio->bi_end_io = end_sync_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) while(sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) int s = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) int d = r1_bio->read_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) int success = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (s > (PAGE_SIZE>>9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) s = PAGE_SIZE >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) /* No rcu protection needed here devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * can only be removed when no resync is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * active, and resync is currently active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (sync_page_io(rdev, sect, s<<9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) pages[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) REQ_OP_READ, 0, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) success = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) d++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (d == conf->raid_disks * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) } while (!success && d != r1_bio->read_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (!success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) int abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) /* Cannot read from anywhere, this block is lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * Record a bad block on each device. If that doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * work just disable and interrupt the recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * Don't fail devices as that won't really help.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) mdname(mddev), bio_devname(bio, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) (unsigned long long)r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) for (d = 0; d < conf->raid_disks * 2; d++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (!rdev || test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (!rdev_set_badblocks(rdev, sect, s, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) conf->recovery_disabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) mddev->recovery_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) set_bit(MD_RECOVERY_INTR, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) md_done_sync(mddev, r1_bio->sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) put_buf(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /* Try next page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) sectors -= s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) sect += s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) start = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) /* write it back and re-read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) while (d != r1_bio->read_disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if (d == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) d = conf->raid_disks * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) d--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (r1_bio->bios[d]->bi_end_io != end_sync_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (r1_sync_page_io(rdev, sect, s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) pages[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) WRITE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) r1_bio->bios[d]->bi_end_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) d = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) while (d != r1_bio->read_disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (d == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) d = conf->raid_disks * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) d--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (r1_bio->bios[d]->bi_end_io != end_sync_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (r1_sync_page_io(rdev, sect, s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) pages[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) READ) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) atomic_add(s, &rdev->corrected_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) sectors -= s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) sect += s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) idx ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) set_bit(R1BIO_Uptodate, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) bio->bi_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) static void process_checks(struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) /* We have read all readable devices. If we haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) * got the block, then there is no hope left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) * If we have, then we want to do a comparison
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) * and skip the write if everything is the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) * If any blocks failed to read, then we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * attempt an over-write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) struct mddev *mddev = r1_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) int primary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) int vcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /* Fix variable parts of all bios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) for (i = 0; i < conf->raid_disks * 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) blk_status_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) struct bio *b = r1_bio->bios[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) struct resync_pages *rp = get_resync_pages(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (b->bi_end_io != end_sync_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* fixup the bio for reuse, but preserve errno */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) status = b->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) bio_reset(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) b->bi_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) b->bi_iter.bi_sector = r1_bio->sector +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) conf->mirrors[i].rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) bio_set_dev(b, conf->mirrors[i].rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) b->bi_end_io = end_sync_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) rp->raid_bio = r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) b->bi_private = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /* initialize bvec table again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) for (primary = 0; primary < conf->raid_disks * 2; primary++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) !r1_bio->bios[primary]->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) r1_bio->bios[primary]->bi_end_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) r1_bio->read_disk = primary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) for (i = 0; i < conf->raid_disks * 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) int j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct bio *pbio = r1_bio->bios[primary];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) struct bio *sbio = r1_bio->bios[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) blk_status_t status = sbio->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) struct page **ppages = get_resync_pages(pbio)->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct page **spages = get_resync_pages(sbio)->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) struct bio_vec *bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) int page_len[RESYNC_PAGES] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (sbio->bi_end_io != end_sync_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /* Now we can 'fixup' the error value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) sbio->bi_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) bio_for_each_segment_all(bi, sbio, iter_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) page_len[j++] = bi->bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) for (j = vcnt; j-- ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (memcmp(page_address(ppages[j]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) page_address(spages[j]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) page_len[j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (j >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) && !status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) /* No need to write to this device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) sbio->bi_end_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) rdev_dec_pending(conf->mirrors[i].rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) bio_copy_data(sbio, pbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) int disks = conf->raid_disks * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) struct bio *wbio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) /* ouch - failed to read all of that. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (!fix_sync_read_error(r1_bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) process_checks(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * schedule writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) atomic_set(&r1_bio->remaining, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) for (i = 0; i < disks ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) wbio = r1_bio->bios[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (wbio->bi_end_io == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) (wbio->bi_end_io == end_sync_read &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) (i == r1_bio->read_disk ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) abort_sync_write(mddev, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) wbio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) wbio->bi_end_io = end_sync_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) atomic_inc(&r1_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) submit_bio_noacct(wbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) put_sync_write_buf(r1_bio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * This is a kernel thread which:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) * 1. Retries failed read operations on working mirrors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * 2. Updates the raid superblock when problems encounter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * 3. Performs writes following reads for array synchronising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) static void fix_read_error(struct r1conf *conf, int read_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) sector_t sect, int sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) struct mddev *mddev = conf->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) while(sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) int s = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) int d = read_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) int success = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (s > (PAGE_SIZE>>9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) s = PAGE_SIZE >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (rdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) (test_bit(In_sync, &rdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) (!test_bit(Faulty, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) rdev->recovery_offset >= sect + s)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) is_badblock(rdev, sect, s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) &first_bad, &bad_sectors) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (sync_page_io(rdev, sect, s<<9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) conf->tmppage, REQ_OP_READ, 0, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) success = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) d++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (d == conf->raid_disks * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) } while (!success && d != read_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (!success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /* Cannot read from anywhere - mark it bad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (!rdev_set_badblocks(rdev, sect, s, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) /* write it back and re-read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) start = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) while (d != read_disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (d==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) d = conf->raid_disks * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) d--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (rdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) !test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) r1_sync_page_io(rdev, sect, s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) conf->tmppage, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) d = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) while (d != read_disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (d==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) d = conf->raid_disks * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) d--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (rdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) !test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (r1_sync_page_io(rdev, sect, s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) conf->tmppage, READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) atomic_add(s, &rdev->corrected_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) mdname(mddev), s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) (unsigned long long)(sect +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) rdev->data_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) sectors -= s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) sect += s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static int narrow_write_error(struct r1bio *r1_bio, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) struct mddev *mddev = r1_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct md_rdev *rdev = conf->mirrors[i].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /* bio has the data to be written to device 'i' where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) * we just recently had a write error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * We repeatedly clone the bio and trim down to one block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) * then try the write. Where the write fails we record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * a bad block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * It is conceivable that the bio doesn't exactly align with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) * blocks. We must handle this somehow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) * We currently own a reference on the rdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) int block_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) int sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) int sect_to_write = r1_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) int ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (rdev->badblocks.shift < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) block_sectors = roundup(1 << rdev->badblocks.shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) bdev_logical_block_size(rdev->bdev) >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) sector = r1_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) sectors = ((sector + block_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) & ~(sector_t)(block_sectors - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) - sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) while (sect_to_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) struct bio *wbio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (sectors > sect_to_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) sectors = sect_to_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) /* Write at 'sector' for 'sectors'*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) wbio = bio_clone_fast(r1_bio->behind_master_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) GFP_NOIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) wbio->bi_iter.bi_sector = r1_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) wbio->bi_iter.bi_size = r1_bio->sectors << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) bio_trim(wbio, sector - r1_bio->sector, sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) wbio->bi_iter.bi_sector += rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) bio_set_dev(wbio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (submit_bio_wait(wbio) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) /* failure! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) ok = rdev_set_badblocks(rdev, sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) sectors, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) && ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) bio_put(wbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) sect_to_write -= sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) sector += sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) sectors = block_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) return ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) int m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) int s = r1_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) for (m = 0; m < conf->raid_disks * 2 ; m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) struct md_rdev *rdev = conf->mirrors[m].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) struct bio *bio = r1_bio->bios[m];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) if (bio->bi_end_io == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (!bio->bi_status &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) test_bit(R1BIO_MadeGood, &r1_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (bio->bi_status &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) test_bit(R1BIO_WriteError, &r1_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) md_error(conf->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) put_buf(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) md_done_sync(conf->mddev, s, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) int m, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) bool fail = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) for (m = 0; m < conf->raid_disks * 2 ; m++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (r1_bio->bios[m] == IO_MADE_GOOD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) struct md_rdev *rdev = conf->mirrors[m].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) rdev_clear_badblocks(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) r1_bio->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) r1_bio->sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) rdev_dec_pending(rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) } else if (r1_bio->bios[m] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) /* This drive got a write error. We need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) * narrow down and record precise write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) * errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) fail = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (!narrow_write_error(r1_bio, m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) md_error(conf->mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) conf->mirrors[m].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) /* an I/O failed, we can't clear the bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) set_bit(R1BIO_Degraded, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) rdev_dec_pending(conf->mirrors[m].rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (fail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) idx = sector_to_idx(r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) atomic_inc(&conf->nr_queued[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) * In case freeze_array() is waiting for condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) * get_unqueued_pending() == extra to be true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) md_wakeup_thread(conf->mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) if (test_bit(R1BIO_WriteError, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) close_write(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) raid_end_bio_io(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) struct mddev *mddev = conf->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) clear_bit(R1BIO_ReadError, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) /* we got a read error. Maybe the drive is bad. Maybe just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) * the block and we can fix it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * We freeze all other IO, and try reading the block from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * other devices. When we find one, we re-write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) * and check it that fixes the read error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) * This is all done synchronously while the array is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) * frozen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) bio = r1_bio->bios[r1_bio->read_disk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) r1_bio->bios[r1_bio->read_disk] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) rdev = conf->mirrors[r1_bio->read_disk].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (mddev->ro == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) && !test_bit(FailFast, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) freeze_array(conf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) fix_read_error(conf, r1_bio->read_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) r1_bio->sector, r1_bio->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) unfreeze_array(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) rdev_dec_pending(rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) allow_barrier(conf, r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) bio = r1_bio->master_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) r1_bio->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) static void raid1d(struct md_thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) struct mddev *mddev = thread->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) struct r1bio *r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) struct list_head *head = &conf->retry_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) md_check_recovery(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) if (!list_empty_careful(&conf->bio_end_io_list) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) LIST_HEAD(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) list_splice_init(&conf->bio_end_io_list, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) while (!list_empty(&tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) r1_bio = list_first_entry(&tmp, struct r1bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) list_del(&r1_bio->retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) idx = sector_to_idx(r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) atomic_dec(&conf->nr_queued[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (mddev->degraded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) set_bit(R1BIO_Degraded, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (test_bit(R1BIO_WriteError, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) close_write(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) raid_end_bio_io(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) flush_pending_writes(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) r1_bio = list_entry(head->prev, struct r1bio, retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) list_del(head->prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) idx = sector_to_idx(r1_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) atomic_dec(&conf->nr_queued[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) mddev = r1_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) test_bit(R1BIO_WriteError, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) handle_sync_write_finished(conf, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) sync_request_write(mddev, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) test_bit(R1BIO_WriteError, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) handle_write_finished(conf, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) else if (test_bit(R1BIO_ReadError, &r1_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) handle_read_error(conf, r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) md_check_recovery(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) static int init_resync(struct r1conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) int buffs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) BUG_ON(mempool_initialized(&conf->r1buf_pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) r1buf_pool_free, conf->poolinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) struct resync_pages *rps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) for (i = conf->poolinfo->raid_disks; i--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) bio = r1bio->bios[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) rps = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) bio_reset(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) bio->bi_private = rps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) r1bio->master_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) return r1bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) * perform a "sync" on one "block"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) * We need to make sure that no normal I/O request - particularly write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) * requests - conflict with active sync requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) * This is achieved by tracking pending requests and a 'barrier' concept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * that can be installed to exclude normal IO requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) int *skipped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) struct r1bio *r1_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) sector_t max_sector, nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) int disk = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) int wonly = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) int write_targets = 0, read_targets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) sector_t sync_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) int still_degraded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) int good_sectors = RESYNC_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) int min_bad = 0; /* number of sectors that are bad in all devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) int idx = sector_to_idx(sector_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) int page_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (!mempool_initialized(&conf->r1buf_pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (init_resync(conf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) max_sector = mddev->dev_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (sector_nr >= max_sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) /* If we aborted, we need to abort the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) * sync on the 'current' bitmap chunk (there will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) * only be one in raid1 resync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) * We can find the current addess in mddev->curr_resync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) if (mddev->curr_resync < max_sector) /* aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) &sync_blocks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) else /* completed sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) conf->fullsync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) md_bitmap_close_sync(mddev->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) close_sync(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) if (mddev_is_clustered(mddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) conf->cluster_sync_low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) conf->cluster_sync_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) if (mddev->bitmap == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) mddev->recovery_cp == MaxSector &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) conf->fullsync == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) *skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) return max_sector - sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /* before building a request, check if we can skip these blocks..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) * This call the bitmap_start_sync doesn't actually record anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) /* We can skip this block, and probably several more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) *skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return sync_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) * If there is non-resync activity waiting for a turn, then let it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) * though before starting on this new sync request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (atomic_read(&conf->nr_waiting[idx]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) /* we are incrementing sector_nr below. To be safe, we check against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * sector_nr + two times RESYNC_SECTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) if (raise_barrier(conf, sector_nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) r1_bio = raid1_alloc_init_r1buf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) * If we get a correctably read error during resync or recovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) * we might want to read from a different device. So we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * flag all drives that could conceivably be read from for READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) * and any others (which will be non-In_sync devices) for WRITE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * If a read fails, we try reading from something else for which READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) * is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) r1_bio->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) r1_bio->sector = sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) r1_bio->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) set_bit(R1BIO_IsSync, &r1_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) /* make sure good_sectors won't go across barrier unit boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) for (i = 0; i < conf->raid_disks * 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) bio = r1_bio->bios[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) rdev = rcu_dereference(conf->mirrors[i].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if (rdev == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (i < conf->raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) still_degraded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) } else if (!test_bit(In_sync, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) bio->bi_end_io = end_sync_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) write_targets ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) /* may need to read from here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) sector_t first_bad = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (is_badblock(rdev, sector_nr, good_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) &first_bad, &bad_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (first_bad > sector_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) good_sectors = first_bad - sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) bad_sectors -= (sector_nr - first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (min_bad == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) min_bad > bad_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) min_bad = bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) if (sector_nr < first_bad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) if (test_bit(WriteMostly, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) if (wonly < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) wonly = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) if (disk < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) disk = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) bio_set_op_attrs(bio, REQ_OP_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) bio->bi_end_io = end_sync_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) read_targets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) * The device is suitable for reading (InSync),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * but has bad block(s) here. Let's try to correct them,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) * if we are doing resync or repair. Otherwise, leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) * this device alone for this sync request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) bio->bi_end_io = end_sync_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) write_targets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) if (rdev && bio->bi_end_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) bio_set_dev(bio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) if (test_bit(FailFast, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) bio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) if (disk < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) disk = wonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) r1_bio->read_disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) if (read_targets == 0 && min_bad > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) /* These sectors are bad on all InSync devices, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) * need to mark them bad on all write targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) int ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) for (i = 0 ; i < conf->raid_disks * 2 ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) struct md_rdev *rdev = conf->mirrors[i].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) ok = rdev_set_badblocks(rdev, sector_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) min_bad, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) ) && ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) *skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) put_buf(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if (!ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) /* Cannot record the badblocks, so need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * abort the resync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * If there are multiple read targets, could just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) * fail the really bad ones ???
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) conf->recovery_disabled = mddev->recovery_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) set_bit(MD_RECOVERY_INTR, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) return min_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) if (min_bad > 0 && min_bad < good_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) /* only resync enough to reach the next bad->good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) * transition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) good_sectors = min_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) /* extra read targets are also write targets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) write_targets += read_targets-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) if (write_targets == 0 || read_targets == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) /* There is nowhere to write, so all non-sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) * drives must be failed - so we are finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) sector_t rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) if (min_bad > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) max_sector = sector_nr + min_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) rv = max_sector - sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) *skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) put_buf(r1_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (max_sector > mddev->resync_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) max_sector = mddev->resync_max; /* Don't do IO beyond here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) if (max_sector > sector_nr + good_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) max_sector = sector_nr + good_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) nr_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) sync_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) int len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (sector_nr + (len>>9) > max_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) len = (max_sector - sector_nr) << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if (sync_blocks == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) &sync_blocks, still_degraded) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) !conf->fullsync &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if ((len >> 9) > sync_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) len = sync_blocks<<9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) for (i = 0 ; i < conf->raid_disks * 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) struct resync_pages *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) bio = r1_bio->bios[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) rp = get_resync_pages(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) if (bio->bi_end_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) page = resync_fetch_page(rp, page_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) * won't fail because the vec table is big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * enough to hold all these pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) bio_add_page(bio, page, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) nr_sectors += len>>9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) sector_nr += len>>9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) sync_blocks -= (len>>9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) } while (++page_idx < RESYNC_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) r1_bio->sectors = nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) if (mddev_is_clustered(mddev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) conf->cluster_sync_high < sector_nr + nr_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) conf->cluster_sync_low = mddev->curr_resync_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) /* Send resync message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) md_cluster_ops->resync_info_update(mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) conf->cluster_sync_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) conf->cluster_sync_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) /* For a user-requested sync, we read all readable devices and do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) * compare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) atomic_set(&r1_bio->remaining, read_targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) bio = r1_bio->bios[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (bio->bi_end_io == end_sync_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) read_targets--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) md_sync_acct_bio(bio, nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (read_targets == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) bio->bi_opf &= ~MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) atomic_set(&r1_bio->remaining, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) bio = r1_bio->bios[r1_bio->read_disk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) md_sync_acct_bio(bio, nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (read_targets == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) bio->bi_opf &= ~MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) return nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) if (sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) return sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) return mddev->dev_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) static struct r1conf *setup_conf(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) struct r1conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) struct raid1_info *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if (!conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) sizeof(atomic_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if (!conf->nr_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) sizeof(atomic_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) if (!conf->nr_waiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) sizeof(atomic_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) if (!conf->nr_queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) sizeof(atomic_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) if (!conf->barrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) mddev->raid_disks, 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) if (!conf->mirrors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) conf->tmppage = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (!conf->tmppage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) if (!conf->poolinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) conf->poolinfo->raid_disks = mddev->raid_disks * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) rbio_pool_free, conf->poolinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) conf->poolinfo->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) spin_lock_init(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) rdev_for_each(rdev, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) int disk_idx = rdev->raid_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) if (disk_idx >= mddev->raid_disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) || disk_idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) if (test_bit(Replacement, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) disk = conf->mirrors + mddev->raid_disks + disk_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) disk = conf->mirrors + disk_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) if (disk->rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) disk->rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) disk->head_position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) disk->seq_start = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) conf->raid_disks = mddev->raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) conf->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) INIT_LIST_HEAD(&conf->retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) INIT_LIST_HEAD(&conf->bio_end_io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) spin_lock_init(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) init_waitqueue_head(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) bio_list_init(&conf->pending_bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) conf->pending_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) conf->recovery_disabled = mddev->recovery_disabled - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) for (i = 0; i < conf->raid_disks * 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) disk = conf->mirrors + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) if (i < conf->raid_disks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) disk[conf->raid_disks].rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) /* This slot has a replacement. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) if (!disk->rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) /* No original, just make the replacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) * a recovering spare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) disk->rdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) disk[conf->raid_disks].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) disk[conf->raid_disks].rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) } else if (!test_bit(In_sync, &disk->rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) /* Original is not in_sync - bad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (!disk->rdev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) !test_bit(In_sync, &disk->rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) disk->head_position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) if (disk->rdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) (disk->rdev->saved_raid_disk < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) conf->fullsync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) conf->thread = md_register_thread(raid1d, mddev, "raid1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) if (!conf->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) return conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) if (conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) mempool_exit(&conf->r1bio_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) kfree(conf->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) safe_put_page(conf->tmppage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) kfree(conf->poolinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) kfree(conf->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) kfree(conf->nr_waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) kfree(conf->nr_queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) kfree(conf->barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) bioset_exit(&conf->bio_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) kfree(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) static void raid1_free(struct mddev *mddev, void *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) static int raid1_run(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) struct r1conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) bool discard_supported = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (mddev->level != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) mdname(mddev), mddev->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) if (mddev->reshape_position != MaxSector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) pr_warn("md/raid1:%s: reshape_position set but not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) if (mddev_init_writes_pending(mddev) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) * copy the already verified devices into our private RAID1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) * bookkeeping area. [whatever we allocate in run(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * should be freed in raid1_free()]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) if (mddev->private == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) conf = setup_conf(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) if (IS_ERR(conf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) return PTR_ERR(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) if (mddev->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) blk_queue_max_write_same_sectors(mddev->queue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) rdev_for_each(rdev, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) if (!mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) disk_stack_limits(mddev->gendisk, rdev->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) rdev->data_offset << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) discard_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) mddev->degraded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) for (i = 0; i < conf->raid_disks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) if (conf->mirrors[i].rdev == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) test_bit(Faulty, &conf->mirrors[i].rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) mddev->degraded++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) * RAID1 needs at least one disk in active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) if (conf->raid_disks - mddev->degraded < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (conf->raid_disks - mddev->degraded == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) mddev->recovery_cp = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) if (mddev->recovery_cp != MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) mdname(mddev), mddev->raid_disks - mddev->degraded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) mddev->raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) * Ok, everything is just fine now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) mddev->thread = conf->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) conf->thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) mddev->private = conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) if (mddev->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (discard_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) blk_queue_flag_set(QUEUE_FLAG_DISCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) mddev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) mddev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) ret = md_integrity_register(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) md_unregister_thread(&mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) raid1_free(mddev, conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) static void raid1_free(struct mddev *mddev, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) struct r1conf *conf = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) mempool_exit(&conf->r1bio_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) kfree(conf->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) safe_put_page(conf->tmppage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) kfree(conf->poolinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) kfree(conf->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) kfree(conf->nr_waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) kfree(conf->nr_queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) kfree(conf->barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) bioset_exit(&conf->bio_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) kfree(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) static int raid1_resize(struct mddev *mddev, sector_t sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) /* no resync is happening, and there is enough space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) * on all devices, so we can resize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) * We need to make sure resync covers any new space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) * If the array is shrinking we should possibly wait until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) * any io in the removed space completes, but it hardly seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) * worth it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) sector_t newsize = raid1_size(mddev, sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) if (mddev->external_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) mddev->array_sectors > newsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) if (mddev->bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) md_set_array_sectors(mddev, newsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) if (sectors > mddev->dev_sectors &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) mddev->recovery_cp > mddev->dev_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) mddev->recovery_cp = mddev->dev_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) mddev->dev_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) mddev->resync_max_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) static int raid1_reshape(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) /* We need to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) * 1/ resize the r1bio_pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) * 2/ resize conf->mirrors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) * We allocate a new r1bio_pool if we can.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) * Then raise a device barrier and wait until all IO stops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) * Then resize conf->mirrors and swap in the new r1bio pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) * At the same time, we "pack" the devices so that all the missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) * devices have the higher raid_disk numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) mempool_t newpool, oldpool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) struct pool_info *newpoolinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) struct raid1_info *newmirrors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) int cnt, raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) int d, d2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) memset(&newpool, 0, sizeof(newpool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) memset(&oldpool, 0, sizeof(oldpool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) /* Cannot change chunk_size, layout, or level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) mddev->layout != mddev->new_layout ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) mddev->level != mddev->new_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) mddev->new_chunk_sectors = mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) mddev->new_layout = mddev->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) mddev->new_level = mddev->level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (!mddev_is_clustered(mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) md_allow_write(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) raid_disks = mddev->raid_disks + mddev->delta_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) if (raid_disks < conf->raid_disks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) cnt=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) for (d= 0; d < conf->raid_disks; d++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) if (conf->mirrors[d].rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) if (cnt > raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) if (!newpoolinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) newpoolinfo->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) newpoolinfo->raid_disks = raid_disks * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) rbio_pool_free, newpoolinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) kfree(newpoolinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) raid_disks, 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) if (!newmirrors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) kfree(newpoolinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) mempool_exit(&newpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) freeze_array(conf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) /* ok, everything is stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) oldpool = conf->r1bio_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) conf->r1bio_pool = newpool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) for (d = d2 = 0; d < conf->raid_disks; d++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) struct md_rdev *rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) if (rdev && rdev->raid_disk != d2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) sysfs_unlink_rdev(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) rdev->raid_disk = d2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) sysfs_unlink_rdev(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) if (sysfs_link_rdev(mddev, rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) pr_warn("md/raid1:%s: cannot register rd%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) mdname(mddev), rdev->raid_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if (rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) newmirrors[d2++].rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) kfree(conf->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) conf->mirrors = newmirrors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) kfree(conf->poolinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) conf->poolinfo = newpoolinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) mddev->degraded += (raid_disks - conf->raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) conf->raid_disks = mddev->raid_disks = raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) mddev->delta_disks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) unfreeze_array(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) mempool_exit(&oldpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) static void raid1_quiesce(struct mddev *mddev, int quiesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) struct r1conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) if (quiesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) freeze_array(conf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) unfreeze_array(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) static void *raid1_takeover(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) /* raid1 can take over:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) * raid5 with 2 devices, any layout or chunk size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) if (mddev->level == 5 && mddev->raid_disks == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) struct r1conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) mddev->new_level = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) mddev->new_layout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) mddev->new_chunk_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) conf = setup_conf(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) if (!IS_ERR(conf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) /* Array must appear to be quiesced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) conf->array_frozen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) mddev_clear_unsupported_flags(mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) UNSUPPORTED_MDDEV_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) return conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) static struct md_personality raid1_personality =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) .name = "raid1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) .level = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) .make_request = raid1_make_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) .run = raid1_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) .free = raid1_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) .status = raid1_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) .error_handler = raid1_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) .hot_add_disk = raid1_add_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) .hot_remove_disk= raid1_remove_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) .spare_active = raid1_spare_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) .sync_request = raid1_sync_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) .resize = raid1_resize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) .size = raid1_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) .check_reshape = raid1_reshape,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) .quiesce = raid1_quiesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) .takeover = raid1_takeover,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) static int __init raid_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) return register_md_personality(&raid1_personality);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) static void raid_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) unregister_md_personality(&raid1_personality);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) module_init(raid_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) module_exit(raid_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) MODULE_ALIAS("md-personality-3"); /* RAID1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) MODULE_ALIAS("md-raid1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) MODULE_ALIAS("md-level-1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);