^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) raid0.c : Multiple Devices driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) Copyright (C) 1994-96 Marc ZYNGIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) <zyngier@ufr-info-p7.ibp.fr> or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) <maz@gloups.fdn.fr>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) RAID-0 management functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <trace/events/block.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "md.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "raid0.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "raid5.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static int default_layout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) module_param(default_layout, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define UNSUPPORTED_MDDEV_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) ((1L << MD_HAS_JOURNAL) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) (1L << MD_JOURNAL_CLEAN) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) (1L << MD_FAILFAST_SUPPORTED) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) (1L << MD_HAS_PPL) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) (1L << MD_HAS_MULTIPLE_PPLS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * inform the user of the raid configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void dump_zones(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) sector_t zone_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) sector_t zone_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct r0conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int raid_disks = conf->strip_zone[0].nb_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) for (j = 0; j < conf->nr_strip_zones; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) char line[200];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) bdevname(conf->devlist[j*raid_disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) + k]->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) pr_debug("md: zone%d=[%s]\n", j, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) zone_size = conf->strip_zone[j].zone_end - zone_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) (unsigned long long)zone_start>>1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) (unsigned long long)conf->strip_zone[j].dev_start>>1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) (unsigned long long)zone_size>>1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) zone_start = conf->strip_zone[j].zone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int i, c, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) sector_t curr_zone_end, sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct strip_zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) char b2[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned blksize = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *private_conf = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) rdev_for_each(rdev1, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pr_debug("md/raid0:%s: looking at %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) bdevname(rdev1->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* round size to chunk_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) sectors = rdev1->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) sector_div(sectors, mddev->chunk_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) rdev1->sectors = sectors * mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) blksize = max(blksize, queue_logical_block_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) rdev1->bdev->bd_disk->queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rdev_for_each(rdev2, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) pr_debug("md/raid0:%s: comparing %s(%llu)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) " with %s(%llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) bdevname(rdev1->bdev,b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) (unsigned long long)rdev1->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bdevname(rdev2->bdev,b2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) (unsigned long long)rdev2->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (rdev2 == rdev1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pr_debug("md/raid0:%s: END\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (rdev2->sectors == rdev1->sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Not unique, don't count it as a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pr_debug("md/raid0:%s: EQUAL\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) c = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pr_debug("md/raid0:%s: NOT EQUAL\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) pr_debug("md/raid0:%s: ==> UNIQUE\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) conf->nr_strip_zones++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pr_debug("md/raid0:%s: %d zones\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) mdname(mddev), conf->nr_strip_zones);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) pr_debug("md/raid0:%s: FINAL %d zones\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) mdname(mddev), conf->nr_strip_zones);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (conf->nr_strip_zones == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) conf->layout = RAID0_ORIG_LAYOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) conf->layout = mddev->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) } else if (default_layout == RAID0_ORIG_LAYOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) conf->layout = default_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) err = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * now since we have the hard sector sizes, we can make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * chunk size is a multiple of that sector size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if ((mddev->chunk_sectors << 9) % blksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) mddev->chunk_sectors << 9, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) conf->strip_zone = kcalloc(conf->nr_strip_zones,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) sizeof(struct strip_zone),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!conf->strip_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) conf->nr_strip_zones,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) mddev->raid_disks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!conf->devlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* The first zone must contain all devices, so here we check that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * there is a proper alignment of slots to devices and find them all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) zone = &conf->strip_zone[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) smallest = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) dev = conf->devlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) rdev_for_each(rdev1, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int j = rdev1->raid_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (mddev->level == 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* taking over a raid10-n2 array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) j /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) rdev1->new_raid_disk = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (mddev->level == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* taiking over a raid1 array-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * we have only one active disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) rdev1->new_raid_disk = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (j < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (j >= mddev->raid_disks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mdname(mddev), j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (dev[j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) mdname(mddev), j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dev[j] = rdev1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!smallest || (rdev1->sectors < smallest->sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) smallest = rdev1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (cnt != mddev->raid_disks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) mdname(mddev), cnt, mddev->raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) zone->nb_dev = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) zone->zone_end = smallest->sectors * cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) curr_zone_end = zone->zone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* now do the other zones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) for (i = 1; i < conf->nr_strip_zones; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) zone = conf->strip_zone + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) dev = conf->devlist + i * mddev->raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) zone->dev_start = smallest->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) smallest = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) for (j=0; j<cnt; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rdev = conf->devlist[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (rdev->sectors <= zone->dev_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pr_debug("md/raid0:%s: checking %s ... nope\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) pr_debug("md/raid0:%s: checking %s ..."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) " contained as device %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) bdevname(rdev->bdev, b), c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dev[c] = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) c++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!smallest || rdev->sectors < smallest->sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) smallest = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) (unsigned long long)rdev->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) zone->nb_dev = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) sectors = (smallest->sectors - zone->dev_start) * c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) zone->nb_dev, (unsigned long long)sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) curr_zone_end += sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) zone->zone_end = curr_zone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) pr_debug("md/raid0:%s: current zone start: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) (unsigned long long)smallest->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) pr_debug("md/raid0:%s: done.\n", mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *private_conf = conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) kfree(conf->strip_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) kfree(conf->devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) kfree(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *private_conf = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* Find the zone which holds a particular offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Update *sectorp to be an offset in that zone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static struct strip_zone *find_zone(struct r0conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) sector_t *sectorp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct strip_zone *z = conf->strip_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) sector_t sector = *sectorp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) for (i = 0; i < conf->nr_strip_zones; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (sector < z[i].zone_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *sectorp = sector - z[i-1].zone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return z + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * remaps the bio to the target device. we separate two flows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * power 2 flow and a general flow for the sake of performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) sector_t sector, sector_t *sector_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned int sect_in_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) sector_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct r0conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int raid_disks = conf->strip_zone[0].nb_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned int chunk_sects = mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (is_power_of_2(chunk_sects)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int chunksect_bits = ffz(~chunk_sects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* find the sector offset inside the chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) sect_in_chunk = sector & (chunk_sects - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) sector >>= chunksect_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* chunk in zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) chunk = *sector_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* quotient is the chunk in real device*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) sector_div(chunk, zone->nb_dev << chunksect_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) } else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) sect_in_chunk = sector_div(sector, chunk_sects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) chunk = *sector_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) sector_div(chunk, chunk_sects * zone->nb_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * position the bio over the real device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * real sector = chunk in device + starting of zone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * + the position in the chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return conf->devlist[(zone - conf->strip_zone)*raid_disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) + sector_div(sector, zone->nb_dev)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) sector_t array_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) WARN_ONCE(sectors || raid_disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) "%s does not support generic reshape\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) rdev_for_each(rdev, mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) array_sectors += (rdev->sectors &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ~(sector_t)(mddev->chunk_sectors-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return array_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void raid0_free(struct mddev *mddev, void *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static int raid0_run(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct r0conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (mddev->chunk_sectors == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (md_check_no_bitmap(mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* if private is not null, we are here after takeover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (mddev->private == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret = create_strip_zones(mddev, &conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) mddev->private = conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (mddev->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) bool discard_supported = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) blk_queue_io_opt(mddev->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) (mddev->chunk_sectors << 9) * mddev->raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) rdev_for_each(rdev, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) disk_stack_limits(mddev->gendisk, rdev->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) rdev->data_offset << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) discard_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!discard_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* calculate array device size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) (unsigned long long)mddev->array_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dump_zones(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ret = md_integrity_register(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void raid0_free(struct mddev *mddev, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct r0conf *conf = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) kfree(conf->strip_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) kfree(conf->devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) kfree(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct r0conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct strip_zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) sector_t start = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) sector_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) unsigned int stripe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) sector_t first_stripe_index, last_stripe_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) sector_t start_disk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) unsigned int start_disk_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) sector_t end_disk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned int end_disk_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned int disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) zone = find_zone(conf, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (bio_end_sector(bio) > zone->zone_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct bio *split = bio_split(bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) bio_chain(split, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) bio = split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) end = zone->zone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) end = bio_end_sector(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (zone != conf->strip_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) end = end - zone[-1].zone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* Now start and end is the offset in zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) stripe_size = zone->nb_dev * mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) first_stripe_index = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sector_div(first_stripe_index, stripe_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) last_stripe_index = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) sector_div(last_stripe_index, stripe_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) start_disk_index = (int)(start - first_stripe_index * stripe_size) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) mddev->chunk_sectors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) first_stripe_index * mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) end_disk_index = (int)(end - last_stripe_index * stripe_size) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mddev->chunk_sectors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) last_stripe_index * mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) for (disk = 0; disk < zone->nb_dev; disk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) sector_t dev_start, dev_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct bio *discard_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (disk < start_disk_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dev_start = (first_stripe_index + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) else if (disk > start_disk_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) dev_start = first_stripe_index * mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) dev_start = start_disk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (disk < end_disk_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) else if (disk > end_disk_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) dev_end = last_stripe_index * mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) dev_end = end_disk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (dev_end <= dev_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) rdev = conf->devlist[(zone - conf->strip_zone) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) conf->strip_zone[0].nb_dev + disk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (__blkdev_issue_discard(rdev->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dev_start + zone->dev_start + rdev->data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) !discard_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) bio_chain(discard_bio, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) bio_clone_blkg_association(discard_bio, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) trace_block_bio_remap(bdev_get_queue(rdev->bdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) discard_bio, disk_devt(mddev->gendisk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) submit_bio_noacct(discard_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct r0conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct strip_zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct md_rdev *tmp_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) sector_t bio_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) sector_t orig_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unsigned chunk_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) unsigned sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (unlikely(bio->bi_opf & REQ_PREFLUSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) && md_flush_request(mddev, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) raid0_handle_discard(mddev, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) bio_sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) sector = bio_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) chunk_sects = mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) sectors = chunk_sects -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) (likely(is_power_of_2(chunk_sects))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ? (sector & (chunk_sects-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) : sector_div(sector, chunk_sects));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* Restore due to sector_div */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) sector = bio_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (sectors < bio_sectors(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct bio *split = bio_split(bio, sectors, GFP_NOIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) bio_chain(split, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) bio = split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) orig_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) zone = find_zone(mddev->private, §or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) switch (conf->layout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) case RAID0_ORIG_LAYOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) tmp_dev = map_sector(mddev, zone, orig_sector, §or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) case RAID0_ALT_MULTIZONE_LAYOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) tmp_dev = map_sector(mddev, zone, sector, §or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) bio_set_dev(bio, tmp_dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) bio->bi_iter.bi_sector = sector + zone->dev_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) tmp_dev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) trace_block_bio_remap(bio->bi_disk->queue, bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) disk_devt(mddev->gendisk), bio_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) mddev_check_writesame(mddev, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) mddev_check_write_zeroes(mddev, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static void raid0_status(struct seq_file *seq, struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static void *raid0_takeover_raid45(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct r0conf *priv_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (mddev->degraded != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) mddev->degraded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) rdev_for_each(rdev, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* check slot number for a disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (rdev->raid_disk == mddev->raid_disks-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) rdev->sectors = mddev->dev_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* Set new parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) mddev->new_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) mddev->new_layout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) mddev->new_chunk_sectors = mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) mddev->raid_disks--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) mddev->delta_disks = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* make sure it will be not marked as dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) mddev->recovery_cp = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) create_strip_zones(mddev, &priv_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return priv_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static void *raid0_takeover_raid10(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct r0conf *priv_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Check layout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * - far_copies must be 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * - near_copies must be 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * - disks number must be even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * - all mirrors must be already degraded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (mddev->layout != ((1 << 8) + 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) mddev->layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (mddev->raid_disks & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (mddev->degraded != (mddev->raid_disks>>1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* Set new parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) mddev->new_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) mddev->new_layout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) mddev->new_chunk_sectors = mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) mddev->delta_disks = - mddev->raid_disks / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) mddev->raid_disks += mddev->delta_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) mddev->degraded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* make sure it will be not marked as dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) mddev->recovery_cp = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) create_strip_zones(mddev, &priv_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return priv_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static void *raid0_takeover_raid1(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct r0conf *priv_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int chunksect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* Check layout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * - (N - 1) mirror drives must be already faulty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if ((mddev->raid_disks - 1) != mddev->degraded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * a raid1 doesn't have the notion of chunk size, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * figure out the largest suitable size we can use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) chunksect = 64 * 2; /* 64K by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* The array must be an exact multiple of chunksize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) while (chunksect && (mddev->array_sectors & (chunksect - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) chunksect >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if ((chunksect << 9) < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* array size does not allow a suitable chunk size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* Set new parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) mddev->new_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) mddev->new_layout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) mddev->new_chunk_sectors = chunksect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) mddev->chunk_sectors = chunksect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) mddev->delta_disks = 1 - mddev->raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) mddev->raid_disks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* make sure it will be not marked as dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) mddev->recovery_cp = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) create_strip_zones(mddev, &priv_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return priv_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static void *raid0_takeover(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* raid0 can take over:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * raid4 - if all data disks are active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * raid5 - providing it is Raid4 layout and one disk is faulty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * raid10 - assuming we have all necessary active disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * raid1 - with (N -1) mirror drives faulty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (mddev->bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (mddev->level == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return raid0_takeover_raid45(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (mddev->level == 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (mddev->layout == ALGORITHM_PARITY_N)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return raid0_takeover_raid45(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) mdname(mddev), ALGORITHM_PARITY_N);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (mddev->level == 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return raid0_takeover_raid10(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (mddev->level == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return raid0_takeover_raid1(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) pr_warn("Takeover from raid%i to raid0 not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) mddev->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static void raid0_quiesce(struct mddev *mddev, int quiesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static struct md_personality raid0_personality=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .name = "raid0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .level = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .make_request = raid0_make_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) .run = raid0_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) .free = raid0_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) .status = raid0_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) .size = raid0_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) .takeover = raid0_takeover,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .quiesce = raid0_quiesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static int __init raid0_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return register_md_personality (&raid0_personality);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static void raid0_exit (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) unregister_md_personality (&raid0_personality);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) module_init(raid0_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) module_exit(raid0_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) MODULE_ALIAS("md-personality-2"); /* RAID0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) MODULE_ALIAS("md-raid0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) MODULE_ALIAS("md-level-0");