Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * raid10.c : Multiple Devices driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2000-2004 Neil Brown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * RAID-10 support for md.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Base on code in raid1.c.  See raid1.c for further copyright information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/raid/md_p.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <trace/events/block.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "md.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "raid10.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "raid0.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "md-bitmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * RAID10 provides a combination of RAID0 and RAID1 functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * The layout of data is defined by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *    chunk_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *    raid_disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *    near_copies (stored in low byte of layout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *    far_copies (stored in second byte of layout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *    far_offset (stored in bit 16 of layout )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *    use_far_sets (stored in bit 17 of layout )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *    use_far_sets_bugfixed (stored in bit 18 of layout )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * The data to be stored is divided into chunks using chunksize.  Each device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * is divided into far_copies sections.   In each section, chunks are laid out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * in a style similar to raid0, but near_copies copies of each chunk is stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * (each on a different drive).  The starting device for each section is offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * near_copies from the starting device of the previous section.  Thus there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * are (near_copies * far_copies) of each chunk, and each is on a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * drive.  near_copies and far_copies must be at least one, and their product
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * is at most raid_disks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * If far_offset is true, then the far_copies are handled a bit differently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * The copies are still in different stripes, but instead of being very far
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * apart on disk, there are adjacent stripes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * The far and offset algorithms are handled slightly differently if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * 'use_far_sets' is true.  In this case, the array's devices are grouped into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * sets that are (near_copies * far_copies) in size.  The far copied stripes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * are still shifted by 'near_copies' devices, but this shifting stays confined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * to the set rather than the entire array.  This is done to improve the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * of device combinations that can fail without causing the array to fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * on a device):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *    A B C D    A B C D E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  *      ...         ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  *    D A B C    E A B C D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  *    [A B] [C D]    [A B] [C D E]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  *    |...| |...|    |...| | ... |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  *    [B A] [D C]    [B A] [E C D]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static void allow_barrier(struct r10conf *conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static void lower_barrier(struct r10conf *conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static int _enough(struct r10conf *conf, int previous, int ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static int enough(struct r10conf *conf, int ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 				int *skipped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) static void end_reshape_write(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static void end_reshape(struct r10conf *conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define raid10_log(md, fmt, args...)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #include "raid1-10.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * for resync bio, r10bio pointer can be retrieved from the per-bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * 'struct resync_pages'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static inline struct r10bio *get_resync_r10bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	return get_resync_pages(bio)->raid_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct r10conf *conf = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	int size = offsetof(struct r10bio, devs[conf->copies]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	/* allocate a r10bio with room for raid_disks entries in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 * bios array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	return kzalloc(size, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) /* amount of memory to reserve for resync requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define RESYNC_WINDOW (1024*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) /* maximum number of concurrent requests, memory permitting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  * When performing a resync, we need to read and compare, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * we need as many pages are there are copies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  * When performing a recovery, we need 2 bios, one for read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * one for write (we recover only one drive per r10buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct r10conf *conf = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct r10bio *r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	int nalloc, nalloc_rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	struct resync_pages *rps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	if (!r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		nalloc = conf->copies; /* resync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		nalloc = 2; /* recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	/* allocate once for all bios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	if (!conf->have_replacement)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		nalloc_rp = nalloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		nalloc_rp = nalloc * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (!rps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		goto out_free_r10bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	 * Allocate bios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	for (j = nalloc ; j-- ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			goto out_free_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		r10_bio->devs[j].bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		if (!conf->have_replacement)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			goto out_free_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		r10_bio->devs[j].repl_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	 * Allocate RESYNC_PAGES data pages and attach them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	 * where needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	for (j = 0; j < nalloc; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		struct bio *rbio = r10_bio->devs[j].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		struct resync_pages *rp, *rp_repl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		rp = &rps[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		if (rbio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			rp_repl = &rps[nalloc + j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		bio = r10_bio->devs[j].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		if (!j || test_bit(MD_RECOVERY_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 				   &conf->mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			if (resync_alloc_pages(rp, gfp_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 				goto out_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			memcpy(rp, &rps[0], sizeof(*rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 			resync_get_all_pages(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		rp->raid_bio = r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		bio->bi_private = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		if (rbio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			memcpy(rp_repl, rp, sizeof(*rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			rbio->bi_private = rp_repl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	return r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) out_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	while (--j >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		resync_free_pages(&rps[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) out_free_bio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	for ( ; j < nalloc; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		if (r10_bio->devs[j].bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			bio_put(r10_bio->devs[j].bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		if (r10_bio->devs[j].repl_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			bio_put(r10_bio->devs[j].repl_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	kfree(rps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) out_free_r10bio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	rbio_pool_free(r10_bio, conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) static void r10buf_pool_free(void *__r10_bio, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	struct r10conf *conf = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	struct r10bio *r10bio = __r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct resync_pages *rp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	for (j = conf->copies; j--; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		struct bio *bio = r10bio->devs[j].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			rp = get_resync_pages(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			resync_free_pages(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		bio = r10bio->devs[j].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	/* resync pages array stored in the 1st bio's .bi_private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	rbio_pool_free(r10bio, conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	for (i = 0; i < conf->copies; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		struct bio **bio = & r10_bio->devs[i].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		if (!BIO_SPECIAL(*bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			bio_put(*bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		*bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		bio = &r10_bio->devs[i].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			bio_put(*bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		*bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static void free_r10bio(struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct r10conf *conf = r10_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	put_all_bios(conf, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	mempool_free(r10_bio, &conf->r10bio_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static void put_buf(struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct r10conf *conf = r10_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	mempool_free(r10_bio, &conf->r10buf_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	lower_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static void reschedule_retry(struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct mddev *mddev = r10_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	list_add(&r10_bio->retry_list, &conf->retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	conf->nr_queued ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	/* wake up frozen array... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * raid_end_bio_io() is called when we have finished servicing a mirrored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * operation and are ready to return a success/failure code to the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * cache layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) static void raid_end_bio_io(struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	struct bio *bio = r10_bio->master_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	struct r10conf *conf = r10_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	 * Wake up any possible resync thread that waits for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 * to go idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	allow_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	free_r10bio(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * Update disk head position estimator based on IRQ completion info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static inline void update_head_pos(int slot, struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	struct r10conf *conf = r10_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		r10_bio->devs[slot].addr + (r10_bio->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  * Find the disk number which triggered given bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			 struct bio *bio, int *slotp, int *replp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	int repl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	for (slot = 0; slot < conf->copies; slot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		if (r10_bio->devs[slot].bio == bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (r10_bio->devs[slot].repl_bio == bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			repl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	BUG_ON(slot == conf->copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	update_head_pos(slot, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (slotp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		*slotp = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	if (replp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		*replp = repl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	return r10_bio->devs[slot].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static void raid10_end_read_request(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	int uptodate = !bio->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	struct r10bio *r10_bio = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	struct r10conf *conf = r10_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	slot = r10_bio->read_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	rdev = r10_bio->devs[slot].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 * this branch is our 'one mirror IO has finished' event handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	update_head_pos(slot, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if (uptodate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		 * Set R10BIO_Uptodate in our master bio, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		 * we will return a good error code to the higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		 * levels even if IO on some other mirrored buffer fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		 * The 'master' represents the composite IO operation to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		 * user-side. So if something waits for IO, then it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		 * wait for the 'master' bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		set_bit(R10BIO_Uptodate, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		/* If all other devices that store this block have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		 * failed, we want to return the error upwards rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		 * than fail the last device.  Here we redefine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		 * "uptodate" to mean "Don't want to retry"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			     rdev->raid_disk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			uptodate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (uptodate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		raid_end_bio_io(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		rdev_dec_pending(rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		 * oops, read error - keep the refcount on the rdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 				   mdname(conf->mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 				   bdevname(rdev->bdev, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 				   (unsigned long long)r10_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		set_bit(R10BIO_ReadError, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		reschedule_retry(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) static void close_write(struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	/* clear the bitmap if all writes complete successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			   r10_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			   !test_bit(R10BIO_Degraded, &r10_bio->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 			   0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	md_write_end(r10_bio->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) static void one_write_done(struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	if (atomic_dec_and_test(&r10_bio->remaining)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		if (test_bit(R10BIO_WriteError, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			reschedule_retry(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			close_write(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 				reschedule_retry(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 				raid_end_bio_io(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) static void raid10_end_write_request(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	struct r10bio *r10_bio = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	int dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	int dec_rdev = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	struct r10conf *conf = r10_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	int slot, repl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	struct md_rdev *rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	struct bio *to_put = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	bool discard_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (repl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		rdev = conf->mirrors[dev].replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	if (!rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		repl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		rdev = conf->mirrors[dev].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 * this branch is our 'one mirror IO has finished' event handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (bio->bi_status && !discard_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		if (repl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			/* Never record new bad blocks to replacement,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			 * just fail it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			md_error(rdev->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			set_bit(WriteErrorSeen,	&rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 				set_bit(MD_RECOVERY_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 					&rdev->mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			dec_rdev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			if (test_bit(FailFast, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			    (bio->bi_opf & MD_FAILFAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 				md_error(rdev->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			 * When the device is faulty, it is not necessary to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			 * handle write error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			if (!test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 				set_bit(R10BIO_WriteError, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 				/* Fail the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 				set_bit(R10BIO_Degraded, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 				r10_bio->devs[slot].bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 				to_put = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 				dec_rdev = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		 * Set R10BIO_Uptodate in our master bio, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		 * we will return a good error code for to the higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		 * levels even if IO on some other mirrored buffer fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		 * The 'master' represents the composite IO operation to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		 * user-side. So if something waits for IO, then it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		 * wait for the 'master' bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		 * Do not set R10BIO_Uptodate if the current device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		 * rebuilding or Faulty. This is because we cannot use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		 * such device for properly reading the data back (we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		 * potentially use it, if the current write would have felt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		 * before rdev->recovery_offset, but for simplicity we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		 * check this here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		if (test_bit(In_sync, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		    !test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			set_bit(R10BIO_Uptodate, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		/* Maybe we can clear some bad blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		if (is_badblock(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				r10_bio->devs[slot].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 				r10_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 				&first_bad, &bad_sectors) && !discard_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			if (repl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 				r10_bio->devs[slot].bio = IO_MADE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 			dec_rdev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			set_bit(R10BIO_MadeGood, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 * Let's see if all mirrored write operations have finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	 * already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	one_write_done(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (dec_rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		rdev_dec_pending(rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (to_put)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		bio_put(to_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  * RAID10 layout manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538)  * As well as the chunksize and raid_disks count, there are two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)  * parameters: near_copies and far_copies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)  * near_copies * far_copies must be <= raid_disks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  * Normally one of these will be 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  * If both are 1, we get raid0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  * If near_copies == raid_disks, we get raid1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  * Chunks are laid out in raid0 style with near_copies copies of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  * first chunk, followed by near_copies copies of the next chunk and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547)  * so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548)  * If far_copies > 1, then after 1/far_copies of the array has been assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549)  * as described above, we start again with a device offset of near_copies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  * So we effectively have another copy of the whole array further down all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * the drives, but with blocks on different drives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  * With this layout, and block is never stored twice on the one device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * raid10_find_phys finds the sector offset of a given virtual sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * on each device that it is on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * raid10_find_virt does the reverse mapping, from a device and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * sector offset to a virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	int n,f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	sector_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	sector_t stripe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	int dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	int slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	int last_far_set_start, last_far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	last_far_set_start *= geo->far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	last_far_set_size = geo->far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	last_far_set_size += (geo->raid_disks % geo->far_set_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	/* now calculate first sector/dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	chunk = r10bio->sector >> geo->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	sector = r10bio->sector & geo->chunk_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	chunk *= geo->near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	stripe = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	dev = sector_div(stripe, geo->raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (geo->far_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		stripe *= geo->far_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	sector += stripe << geo->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	/* and calculate all the others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	for (n = 0; n < geo->near_copies; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		int d = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		int set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		sector_t s = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		r10bio->devs[slot].devnum = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		r10bio->devs[slot].addr = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		slot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		for (f = 1; f < geo->far_copies; f++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			set = d / geo->far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			d += geo->near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			if ((geo->raid_disks % geo->far_set_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			    (d > last_far_set_start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 				d -= last_far_set_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 				d %= last_far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 				d += last_far_set_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 				d %= geo->far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 				d += geo->far_set_size * set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			s += geo->stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			r10bio->devs[slot].devnum = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			r10bio->devs[slot].addr = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			slot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		dev++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		if (dev >= geo->raid_disks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			dev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			sector += (geo->chunk_mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	struct geom *geo = &conf->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (conf->reshape_progress != MaxSector &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	    ((r10bio->sector >= conf->reshape_progress) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	     conf->mddev->reshape_backwards)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		set_bit(R10BIO_Previous, &r10bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		geo = &conf->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		clear_bit(R10BIO_Previous, &r10bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	__raid10_find_phys(geo, r10bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	sector_t offset, chunk, vchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	/* Never use conf->prev as this is only called during resync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 * or recovery, so reshape isn't happening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	struct geom *geo = &conf->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	int far_set_size = geo->far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	int last_far_set_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	if (geo->raid_disks % geo->far_set_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		last_far_set_start *= geo->far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		if (dev >= last_far_set_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			far_set_size = geo->far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			far_set_size += (geo->raid_disks % geo->far_set_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			far_set_start = last_far_set_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	offset = sector & geo->chunk_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	if (geo->far_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		int fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		chunk = sector >> geo->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		fc = sector_div(chunk, geo->far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		dev -= fc * geo->near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		if (dev < far_set_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			dev += far_set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		while (sector >= geo->stride) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			sector -= geo->stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			if (dev < (geo->near_copies + far_set_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 				dev += far_set_size - geo->near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 				dev -= geo->near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		chunk = sector >> geo->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	vchunk = chunk * geo->raid_disks + dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	sector_div(vchunk, geo->near_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	return (vchunk << geo->chunk_shift) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * This routine returns the disk from which the requested read should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  * be done. There is a per-array 'next expected sequential IO' sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * number - if this matches on the next IO then we use the last disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  * There is also a per-disk 'last know head position' sector that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  * maintained from IRQ contexts, both the normal and the resync IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  * completion handlers update this position correctly. If there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  * perfect sequential match then we pick the disk whose head is closest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * If there are 2 mirrors in the same 2 devices, performance degrades
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  * because position is mirror, not device based.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * The rdev for the device selected will have nr_pending incremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  * FIXME: possibly should rethink readbalancing and do it differently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  * depending on near_copies / far_copies geometry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) static struct md_rdev *read_balance(struct r10conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 				    struct r10bio *r10_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 				    int *max_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	const sector_t this_sector = r10_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	int disk, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	int sectors = r10_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	int best_good_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	sector_t new_distance, best_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	int do_balance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	int best_dist_slot, best_pending_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	bool has_nonrot_disk = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	unsigned int min_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct geom *geo = &conf->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	raid10_find_phys(conf, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	best_dist_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	min_pending = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	best_dist_rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	best_pending_rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	best_dist = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	best_good_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	do_balance = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	clear_bit(R10BIO_FailFast, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	 * Check if we can balance. We can balance on the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	 * device if no resync is going on (recovery is ok), or below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * the resync window. We take the first readable disk when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * above the resync window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	if ((conf->mddev->recovery_cp < MaxSector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	     && (this_sector + sectors >= conf->next_resync)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	    (mddev_is_clustered(conf->mddev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	     md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 					    this_sector + sectors)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		do_balance = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	for (slot = 0; slot < conf->copies ; slot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		sector_t dev_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		unsigned int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		bool nonrot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		if (r10_bio->devs[slot].bio == IO_BLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		disk = r10_bio->devs[slot].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		rdev = rcu_dereference(conf->mirrors[disk].replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			rdev = rcu_dereference(conf->mirrors[disk].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		if (rdev == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		    test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		if (!test_bit(In_sync, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		dev_sector = r10_bio->devs[slot].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (is_badblock(rdev, dev_sector, sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 				&first_bad, &bad_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			if (best_dist < MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 				/* Already have a better slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			if (first_bad <= dev_sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 				/* Cannot read here.  If this is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 				 * 'primary' device, then we must not read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 				 * beyond 'bad_sectors' from another device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 				bad_sectors -= (dev_sector - first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 				if (!do_balance && sectors > bad_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 					sectors = bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 				if (best_good_sectors > sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 					best_good_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 				sector_t good_sectors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 					first_bad - dev_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				if (good_sectors > best_good_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 					best_good_sectors = good_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 					best_dist_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 					best_dist_rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 				if (!do_balance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 					/* Must read from here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			best_good_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		if (!do_balance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		has_nonrot_disk |= nonrot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		pending = atomic_read(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		if (min_pending > pending && nonrot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			min_pending = pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			best_pending_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			best_pending_rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		if (best_dist_slot >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			/* At least 2 disks to choose from so failfast is OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			set_bit(R10BIO_FailFast, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		/* This optimisation is debatable, and completely destroys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		 * sequential read speed for 'far copies' arrays.  So only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		 * keep it for 'near' arrays, and review those later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		if (geo->near_copies > 1 && !pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			new_distance = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		/* for far > 1 always use the lowest address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		else if (geo->far_copies > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			new_distance = r10_bio->devs[slot].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			new_distance = abs(r10_bio->devs[slot].addr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 					   conf->mirrors[disk].head_position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		if (new_distance < best_dist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			best_dist = new_distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			best_dist_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			best_dist_rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (slot >= conf->copies) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		if (has_nonrot_disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			slot = best_pending_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			rdev = best_pending_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			slot = best_dist_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			rdev = best_dist_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (slot >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		r10_bio->read_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	*max_sectors = best_good_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	return rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) static void flush_pending_writes(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	/* Any writes that have been queued but are awaiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	 * bitmap updates get flushed here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (conf->pending_bio_list.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		bio = bio_list_get(&conf->pending_bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		conf->pending_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		 * As this is called in a wait_event() loop (see freeze_array),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		 * current->state might be TASK_UNINTERRUPTIBLE which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		 * cause a warning when we prepare to wait again.  As it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		 * rare that this path is taken, it is perfectly safe to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		 * us to go around the wait_event() loop again, so the warning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		 * is a false-positive. Silence the warning by resetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		 * thread state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		/* flush any pending bitmap writes to disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		 * before proceeding w/ I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		md_bitmap_unplug(conf->mddev->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		while (bio) { /* submit pending writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			struct bio *next = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			struct md_rdev *rdev = (void*)bio->bi_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			bio_set_dev(bio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			if (test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 				bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 					    !blk_queue_discard(bio->bi_disk->queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				/* Just ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 				bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 				submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			bio = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) /* Barriers....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * Sometimes we need to suspend IO while we do something else,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  * either some resync/recovery, or reconfigure the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  * To do this we raise a 'barrier'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  * The 'barrier' is a counter that can be raised multiple times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  * to count how many activities are happening which preclude
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * normal IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  * We can only raise the barrier if there is no pending IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  * i.e. if nr_pending == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  * We choose only to raise the barrier if no-one is waiting for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913)  * barrier to go down.  This means that as soon as an IO request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914)  * is ready, no other operations which require a barrier will start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915)  * until the IO request has had a chance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917)  * So: regular IO calls 'wait_barrier'.  When that returns there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918)  *    is no backgroup IO happening,  It must arrange to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919)  *    allow_barrier when it has finished its IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  * backgroup IO calls must call raise_barrier.  Once that returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  *    there is no normal IO happeing.  It must arrange to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  *    lower_barrier when the particular background IO completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) static void raise_barrier(struct r10conf *conf, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	BUG_ON(force && !conf->barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	spin_lock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	/* Wait until no block IO is waiting (unless 'force') */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			    conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	/* block any new IO from starting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	conf->barrier++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	/* Now wait for all pending IO to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	wait_event_lock_irq(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			    !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			    conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) static void lower_barrier(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	spin_lock_irqsave(&conf->resync_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	conf->barrier--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	spin_unlock_irqrestore(&conf->resync_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) static void wait_barrier(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	spin_lock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (conf->barrier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		struct bio_list *bio_list = current->bio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		conf->nr_waiting++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		/* Wait for the barrier to drop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		 * However if there are already pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		 * requests (preventing the barrier from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		 * rising completely), and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		 * pre-process bio queue isn't empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		 * then don't wait, as we need to empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		 * that queue to get the nr_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		 * count down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		raid10_log(conf->mddev, "wait barrier");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		wait_event_lock_irq(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 				    !conf->barrier ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 				    (atomic_read(&conf->nr_pending) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 				     bio_list &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 				     (!bio_list_empty(&bio_list[0]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 				      !bio_list_empty(&bio_list[1]))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 				     /* move on if recovery thread is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 				      * blocked by us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 				      */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 				     (conf->mddev->thread->tsk == current &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				      test_bit(MD_RECOVERY_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 					       &conf->mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 				      conf->nr_queued > 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 				    conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		conf->nr_waiting--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		if (!conf->nr_waiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	atomic_inc(&conf->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) static void allow_barrier(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if ((atomic_dec_and_test(&conf->nr_pending)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			(conf->array_freeze_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) static void freeze_array(struct r10conf *conf, int extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	/* stop syncio and normal IO and wait for everything to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 * go quiet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 * We increment barrier and nr_waiting, and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 * wait until nr_pending match nr_queued+extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 * This is called in the context of one normal IO request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * that has failed. Thus any sync request that might be pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 * will be blocked by nr_pending, and we need to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 * pending IO requests to complete or be queued for re-try.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	 * Thus the number queued (nr_queued) plus this request (extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 * must match the number of pending IOs (nr_pending) before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	 * we continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	spin_lock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	conf->array_freeze_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	conf->barrier++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	conf->nr_waiting++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	wait_event_lock_irq_cmd(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 				atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				conf->resync_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				flush_pending_writes(conf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	conf->array_freeze_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static void unfreeze_array(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	/* reverse the effect of the freeze */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	spin_lock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	conf->barrier--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	conf->nr_waiting--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	spin_unlock_irq(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static sector_t choose_data_offset(struct r10bio *r10_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 				   struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	    test_bit(R10BIO_Previous, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		return rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		return rdev->new_data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct raid10_plug_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	struct blk_plug_cb	cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	struct bio_list		pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	int			pending_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 						   cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	struct mddev *mddev = plug->cb.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (from_schedule || current->bio_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		bio_list_merge(&conf->pending_bio_list, &plug->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		conf->pending_count += plug->pending_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		kfree(plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	/* we aren't scheduling, so we can do the write-out directly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	bio = bio_list_get(&plug->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	md_bitmap_unplug(mddev->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	while (bio) { /* submit pending writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		struct bio *next = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		struct md_rdev *rdev = (void*)bio->bi_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		bio_set_dev(bio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		if (test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 				    !blk_queue_discard(bio->bi_disk->queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			/* Just ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		bio = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	kfree(plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  * 1. Register the new request and wait if the reconstruction thread has put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  * up a bar for new requests. Continue immediately if no resync is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  * currently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  * 2. If IO spans the reshape position.  Need to wait for reshape to pass.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				 struct bio *bio, sector_t sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	wait_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	    bio->bi_iter.bi_sector < conf->reshape_progress &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		raid10_log(conf->mddev, "wait reshape");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		allow_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		wait_event(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			   conf->reshape_progress >= bio->bi_iter.bi_sector +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			   sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		wait_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static void raid10_read_request(struct mddev *mddev, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 				struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	struct bio *read_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	const int op = bio_op(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	int max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	int slot = r10_bio->read_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	struct md_rdev *err_rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	gfp_t gfp = GFP_NOIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (slot >= 0 && r10_bio->devs[slot].rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		 * This is an error retry, but we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		 * safely dereference the rdev in the r10_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		 * we must use the one in conf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		 * If it has already been disconnected (unlikely)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		 * we lose the device name in error messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		int disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		 * As we are blocking raid10, it is a little safer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		 * use __GFP_HIGH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		gfp = GFP_NOIO | __GFP_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		disk = r10_bio->devs[slot].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		if (err_rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			bdevname(err_rdev->bdev, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 			strcpy(b, "???");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 			/* This never gets dereferenced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			err_rdev = r10_bio->devs[slot].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	regular_request_wait(mddev, conf, bio, r10_bio->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	rdev = read_balance(conf, r10_bio, &max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (!rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		if (err_rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 					    mdname(mddev), b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 					    (unsigned long long)r10_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		raid_end_bio_io(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (err_rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 				   mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 				   bdevname(rdev->bdev, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 				   (unsigned long long)r10_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	if (max_sectors < bio_sectors(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		struct bio *split = bio_split(bio, max_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 					      gfp, &conf->bio_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		bio_chain(split, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		allow_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		wait_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		bio = split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		r10_bio->master_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		r10_bio->sectors = max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	slot = r10_bio->read_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	r10_bio->devs[slot].bio = read_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	r10_bio->devs[slot].rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		choose_data_offset(r10_bio, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	bio_set_dev(read_bio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	read_bio->bi_end_io = raid10_end_read_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	bio_set_op_attrs(read_bio, op, do_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (test_bit(FailFast, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	    test_bit(R10BIO_FailFast, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	        read_bio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	read_bio->bi_private = r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	        trace_block_bio_remap(read_bio->bi_disk->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	                              read_bio, disk_devt(mddev->gendisk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	                              r10_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	submit_bio_noacct(read_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 				  struct bio *bio, bool replacement,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 				  int n_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	const int op = bio_op(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	struct blk_plug_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	struct raid10_plug_cb *plug = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	int devnum = r10_bio->devs[n_copy].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	struct bio *mbio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (replacement) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		rdev = conf->mirrors[devnum].replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		if (rdev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 			/* Replacement just got moved to main 'rdev' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			rdev = conf->mirrors[devnum].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		rdev = conf->mirrors[devnum].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (replacement)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		r10_bio->devs[n_copy].repl_bio = mbio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		r10_bio->devs[n_copy].bio = mbio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	mbio->bi_iter.bi_sector	= (r10_bio->devs[n_copy].addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 				   choose_data_offset(r10_bio, rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	bio_set_dev(mbio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	mbio->bi_end_io	= raid10_end_write_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	bio_set_op_attrs(mbio, op, do_sync | do_fua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (!replacement && test_bit(FailFast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 				     &conf->mirrors[devnum].rdev->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			 && enough(conf, devnum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		mbio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	mbio->bi_private = r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (conf->mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		trace_block_bio_remap(mbio->bi_disk->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 				      mbio, disk_devt(conf->mddev->gendisk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 				      r10_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	/* flush_pending_writes() needs access to the rdev so...*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	mbio->bi_disk = (void *)rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	atomic_inc(&r10_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	if (cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		plug = container_of(cb, struct raid10_plug_cb, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		plug = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (plug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		bio_list_add(&plug->pending, mbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		plug->pending_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		bio_list_add(&conf->pending_bio_list, mbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		conf->pending_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static void raid10_write_request(struct mddev *mddev, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 				 struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	struct md_rdev *blocked_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	sector_t sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	int max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if ((mddev_is_clustered(mddev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	     md_cluster_ops->area_resyncing(mddev, WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 					    bio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 					    bio_end_sector(bio)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		DEFINE_WAIT(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			prepare_to_wait(&conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 					&w, TASK_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 				 bio->bi_iter.bi_sector, bio_end_sector(bio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		finish_wait(&conf->wait_barrier, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	sectors = r10_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	regular_request_wait(mddev, conf, bio, sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	    (mddev->reshape_backwards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		bio->bi_iter.bi_sector < conf->reshape_progress))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		/* Need to update reshape_position in metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		mddev->reshape_position = conf->reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		set_mask_bits(&mddev->sb_flags, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		raid10_log(conf->mddev, "wait reshape metadata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		wait_event(mddev->sb_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		conf->reshape_safe = mddev->reshape_position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	if (conf->pending_count >= max_queued_requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		raid10_log(mddev, "wait queued");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		wait_event(conf->wait_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			   conf->pending_count < max_queued_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	/* first select target devices under rcu_lock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	 * inc refcount on their rdev.  Record them by setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	 * bios[x] to bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	 * If there are known/acknowledged bad blocks on any device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	 * on which we have seen a write error, we want to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	 * writing to those blocks.  This potentially requires several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	 * writes to write around the bad blocks.  Each set of writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	 * gets its own r10_bio with a set of bios attached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	raid10_find_phys(conf, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) retry_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	blocked_rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	max_sectors = r10_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	for (i = 0;  i < conf->copies; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		int d = r10_bio->devs[i].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		struct md_rdev *rrdev = rcu_dereference(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			conf->mirrors[d].replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		if (rdev == rrdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 			rrdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			blocked_rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			atomic_inc(&rrdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			blocked_rdev = rrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		if (rdev && (test_bit(Faulty, &rdev->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			rrdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		r10_bio->devs[i].bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		r10_bio->devs[i].repl_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		if (!rdev && !rrdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			set_bit(R10BIO_Degraded, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			sector_t dev_sector = r10_bio->devs[i].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			int is_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			is_bad = is_badblock(rdev, dev_sector, max_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 					     &first_bad, &bad_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			if (is_bad < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 				/* Mustn't write here until the bad block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 				 * is acknowledged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 				atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 				set_bit(BlockedBadBlocks, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 				blocked_rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			if (is_bad && first_bad <= dev_sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 				/* Cannot write here at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 				bad_sectors -= (dev_sector - first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 				if (bad_sectors < max_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 					/* Mustn't write more than bad_sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 					 * to other devices yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 					max_sectors = bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 				/* We don't set R10BIO_Degraded as that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 				 * only applies if the disk is missing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 				 * so it might be re-added, and we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 				 * know to recover this chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 				 * In this case the device is here, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 				 * fact that this chunk is not in-sync is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 				 * recorded in the bad block log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			if (is_bad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 				int good_sectors = first_bad - dev_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 				if (good_sectors < max_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 					max_sectors = good_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		if (rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			r10_bio->devs[i].bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		if (rrdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			r10_bio->devs[i].repl_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			atomic_inc(&rrdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	if (unlikely(blocked_rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		/* Have to wait for this device to get unblocked, then retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		int d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			if (r10_bio->devs[j].bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 				d = r10_bio->devs[j].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 			if (r10_bio->devs[j].repl_bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 				struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 				d = r10_bio->devs[j].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 				rdev = conf->mirrors[d].replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 				if (!rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 					/* Race with remove_disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 					smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 					rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 				rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		allow_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		wait_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		goto retry_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (max_sectors < r10_bio->sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		r10_bio->sectors = max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	if (r10_bio->sectors < bio_sectors(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		struct bio *split = bio_split(bio, r10_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 					      GFP_NOIO, &conf->bio_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		bio_chain(split, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		allow_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		wait_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		bio = split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		r10_bio->master_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	atomic_set(&r10_bio->remaining, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	for (i = 0; i < conf->copies; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		if (r10_bio->devs[i].bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			raid10_write_one_disk(mddev, r10_bio, bio, false, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		if (r10_bio->devs[i].repl_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 			raid10_write_one_disk(mddev, r10_bio, bio, true, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	one_write_done(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	struct r10bio *r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	r10_bio->master_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	r10_bio->sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	r10_bio->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	r10_bio->sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	r10_bio->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	r10_bio->read_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (bio_data_dir(bio) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		raid10_read_request(mddev, bio, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		raid10_write_request(mddev, bio, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	int chunk_sects = chunk_mask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	int sectors = bio_sectors(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	    && md_flush_request(mddev, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	if (!md_write_start(mddev, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	 * If this request crosses a chunk boundary, we need to split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	 * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		     sectors > chunk_sects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		     && (conf->geo.near_copies < conf->geo.raid_disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			 || conf->prev.near_copies <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			 conf->prev.raid_disks)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		sectors = chunk_sects -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 			(bio->bi_iter.bi_sector &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			 (chunk_sects - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	__make_request(mddev, bio, sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	/* In case raid10d snuck in to freeze_array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static void raid10_status(struct seq_file *seq, struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	if (conf->geo.near_copies < conf->geo.raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	if (conf->geo.near_copies > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	if (conf->geo.far_copies > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		if (conf->geo.far_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		if (conf->geo.far_set_size != conf->geo.raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 					conf->geo.raid_disks - mddev->degraded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	for (i = 0; i < conf->geo.raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	seq_printf(seq, "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /* check if there are enough drives for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)  * every block to appear on atleast one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)  * Don't consider the device numbered 'ignore'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)  * as we might be about to remove it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static int _enough(struct r10conf *conf, int previous, int ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	int first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	int has_enough = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	int disks, ncopies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	if (previous) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		disks = conf->prev.raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		ncopies = conf->prev.near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		disks = conf->geo.raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		ncopies = conf->geo.near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		int n = conf->copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		int this = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		while (n--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			if (this != ignore &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 			    (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 			    test_bit(In_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 				cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 			this = (this+1) % disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		if (cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		first = (first + ncopies) % disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	} while (first != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	has_enough = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	return has_enough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static int enough(struct r10conf *conf, int ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	/* when calling 'enough', both 'prev' and 'geo' must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	 * be stable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	 * This is ensured if ->reconfig_mutex or ->device_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	 * is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	return _enough(conf, 0, ignore) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		_enough(conf, 1, ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	 * If it is not operational, then we have already marked it as dead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	 * else if it is the last working disks with "fail_last_dev == false",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	 * ignore the error, let the next level up know.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	 * else mark the drive as failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	    && !enough(conf, rdev->raid_disk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		 * Don't fail the drive, just return an IO error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	if (test_and_clear_bit(In_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		mddev->degraded++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	 * If recovery is running, make sure it aborts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	set_bit(Blocked, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	set_bit(Faulty, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	set_mask_bits(&mddev->sb_flags, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		"md/raid10:%s: Operation continuing on %d devices.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		mdname(mddev), bdevname(rdev->bdev, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		mdname(mddev), conf->geo.raid_disks - mddev->degraded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) static void print_conf(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	pr_debug("RAID10 conf printout:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	if (!conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		pr_debug("(!conf)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		 conf->geo.raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	/* This is only called with ->reconfix_mutex held, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	 * rcu protection of rdev is not needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	for (i = 0; i < conf->geo.raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		rdev = conf->mirrors[i].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		if (rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 				 i, !test_bit(In_sync, &rdev->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 				 !test_bit(Faulty, &rdev->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 				 bdevname(rdev->bdev,b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) static void close_sync(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	wait_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	allow_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	mempool_exit(&conf->r10buf_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static int raid10_spare_active(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	struct raid10_info *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	 * Find all non-in_sync disks within the RAID10 configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	 * and mark them in_sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	for (i = 0; i < conf->geo.raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		tmp = conf->mirrors + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		if (tmp->replacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		    && tmp->replacement->recovery_offset == MaxSector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		    && !test_bit(Faulty, &tmp->replacement->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			/* Replacement has just become active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			if (!tmp->rdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 				count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			if (tmp->rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 				/* Replaced device not technically faulty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 				 * but we need to be sure it gets removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 				 * and never re-added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 				set_bit(Faulty, &tmp->rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 				sysfs_notify_dirent_safe(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 					tmp->rdev->sysfs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		} else if (tmp->rdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 			   && tmp->rdev->recovery_offset == MaxSector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 			   && !test_bit(Faulty, &tmp->rdev->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	mddev->degraded -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	print_conf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	int err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	int mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	int first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	int last = conf->geo.raid_disks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	if (mddev->recovery_cp < MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		/* only hot-add to in-sync arrays, as recovery is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		 * very different from resync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	if (md_integrity_add_rdev(rdev, mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	if (rdev->raid_disk >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		first = last = rdev->raid_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (rdev->saved_raid_disk >= first &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	    rdev->saved_raid_disk < conf->geo.raid_disks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		mirror = rdev->saved_raid_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		mirror = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	for ( ; mirror <= last ; mirror++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		struct raid10_info *p = &conf->mirrors[mirror];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		if (p->recovery_disabled == mddev->recovery_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		if (p->rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			if (!test_bit(WantReplacement, &p->rdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			    p->replacement != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 			clear_bit(In_sync, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 			set_bit(Replacement, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			rdev->raid_disk = mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			if (mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 				disk_stack_limits(mddev->gendisk, rdev->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 						  rdev->data_offset << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 			conf->fullsync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			rcu_assign_pointer(p->replacement, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		if (mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			disk_stack_limits(mddev->gendisk, rdev->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 					  rdev->data_offset << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		p->head_position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		p->recovery_disabled = mddev->recovery_disabled - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		rdev->raid_disk = mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		if (rdev->saved_raid_disk != mirror)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			conf->fullsync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		rcu_assign_pointer(p->rdev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	print_conf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	int number = rdev->raid_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	struct md_rdev **rdevp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	struct raid10_info *p = conf->mirrors + number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	print_conf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	if (rdev == p->rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		rdevp = &p->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	else if (rdev == p->replacement)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		rdevp = &p->replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	if (test_bit(In_sync, &rdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	    atomic_read(&rdev->nr_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	/* Only remove non-faulty devices if recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	 * is not possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	if (!test_bit(Faulty, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	    mddev->recovery_disabled != p->recovery_disabled &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	    (!p->replacement || p->replacement == rdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	    number < conf->geo.raid_disks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	    enough(conf, -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	*rdevp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	if (!test_bit(RemoveSynchronized, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		if (atomic_read(&rdev->nr_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 			/* lost the race, try later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 			*rdevp = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	if (p->replacement) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		/* We must have just cleared 'rdev' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		p->rdev = p->replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		clear_bit(Replacement, &p->replacement->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		smp_mb(); /* Make sure other CPUs may see both as identical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			   * but will never see neither -- if they are careful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		p->replacement = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	clear_bit(WantReplacement, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	err = md_integrity_register(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	print_conf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	struct r10conf *conf = r10_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	if (!bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		set_bit(R10BIO_Uptodate, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		/* The write handler will notice the lack of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		 * R10BIO_Uptodate and record any errors etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		atomic_add(r10_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 			   &conf->mirrors[d].rdev->corrected_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	/* for reconstruct, we always reschedule after a read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	 * for resync, only after all reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	    atomic_dec_and_test(&r10_bio->remaining)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		/* we have read all the blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		 * do the comparison in process context in raid10d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		reschedule_retry(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) static void end_sync_read(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	struct r10bio *r10_bio = get_resync_r10bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	struct r10conf *conf = r10_bio->mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	__end_sync_read(r10_bio, bio, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) static void end_reshape_read(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	/* reshape read bio isn't allocated from r10buf_pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	struct r10bio *r10_bio = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	__end_sync_read(r10_bio, bio, r10_bio->read_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static void end_sync_request(struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	struct mddev *mddev = r10_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	while (atomic_dec_and_test(&r10_bio->remaining)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		if (r10_bio->master_bio == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			/* the primary of several recovery bios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			sector_t s = r10_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			    test_bit(R10BIO_WriteError, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 				reschedule_retry(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 				put_buf(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 			md_done_sync(mddev, s, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			    test_bit(R10BIO_WriteError, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 				reschedule_retry(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 				put_buf(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 			r10_bio = r10_bio2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) static void end_sync_write(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	struct r10bio *r10_bio = get_resync_r10bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	struct mddev *mddev = r10_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	int d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	int repl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	struct md_rdev *rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	if (repl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		rdev = conf->mirrors[d].replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		if (repl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 			md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 			set_bit(WriteErrorSeen, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 				set_bit(MD_RECOVERY_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 					&rdev->mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 			set_bit(R10BIO_WriteError, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	} else if (is_badblock(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			     r10_bio->devs[slot].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			     r10_bio->sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 			     &first_bad, &bad_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		set_bit(R10BIO_MadeGood, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	end_sync_request(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)  * Note: sync and recover and handled very differently for raid10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)  * This code is for resync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)  * For resync, we read through virtual addresses and read all blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)  * If there is any error, we schedule a write.  The lowest numbered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)  * drive is authoritative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)  * However requests come for physical address, so we need to map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)  * For every physical address there are raid_disks/copies virtual addresses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)  * which is always are least one, but is not necessarly an integer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)  * This means that a physical address can span multiple chunks, so we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)  * have to submit multiple io requests for a single sync request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)  * We check if all blocks are in-sync and only write to blocks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)  * aren't in sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	int i, first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	struct bio *tbio, *fbio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	int vcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	struct page **tpages, **fpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	atomic_set(&r10_bio->remaining, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	/* find the first device with a block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	for (i=0; i<conf->copies; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		if (!r10_bio->devs[i].bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	if (i == conf->copies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	first = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	fbio = r10_bio->devs[i].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	fbio->bi_iter.bi_size = r10_bio->sectors << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	fbio->bi_iter.bi_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	fpages = get_resync_pages(fbio)->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	/* now find blocks with errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	for (i=0 ; i < conf->copies ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		int  j, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		struct resync_pages *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		tbio = r10_bio->devs[i].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		if (tbio->bi_end_io != end_sync_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		if (i == first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		tpages = get_resync_pages(tbio)->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		d = r10_bio->devs[i].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		if (!r10_bio->devs[i].bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 			/* We know that the bi_io_vec layout is the same for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 			 * both 'first' and 'i', so we just compare them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 			 * All vec entries are PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			int sectors = r10_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 			for (j = 0; j < vcnt; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 				int len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 				if (sectors < (len / 512))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 					len = sectors * 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 				if (memcmp(page_address(fpages[j]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 					   page_address(tpages[j]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 					   len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 				sectors -= len/512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 			if (j == vcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 				/* Don't fix anything. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		} else if (test_bit(FailFast, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 			/* Just give up on this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 			md_error(rdev->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		/* Ok, we need to write this bio, either to correct an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		 * inconsistency or to correct an unreadable block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		 * First we need to fixup bv_offset, bv_len and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		 * bi_vecs, as the read request might have corrupted these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		rp = get_resync_pages(tbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		bio_reset(tbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		rp->raid_bio = r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		tbio->bi_private = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		tbio->bi_end_io = end_sync_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		bio_copy_data(tbio, fbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		atomic_inc(&r10_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 			tbio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		submit_bio_noacct(tbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	/* Now write out to any replacement devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	 * that are active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	for (i = 0; i < conf->copies; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		int d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		tbio = r10_bio->devs[i].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		if (!tbio || !tbio->bi_end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		    && r10_bio->devs[i].bio != fbio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 			bio_copy_data(tbio, fbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		d = r10_bio->devs[i].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		atomic_inc(&r10_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		md_sync_acct(conf->mirrors[d].replacement->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 			     bio_sectors(tbio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		submit_bio_noacct(tbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	if (atomic_dec_and_test(&r10_bio->remaining)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		md_done_sync(mddev, r10_bio->sectors, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		put_buf(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)  * Now for the recovery code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)  * Recovery happens across physical sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)  * We recover all non-is_sync drives by finding the virtual address of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)  * each, and then choose a working drive that also has that virt address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)  * There is a separate r10_bio for each non-in_sync drive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)  * Only the first two slots are in use. The first for reading,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)  * The second for writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) static void fix_recovery_read_error(struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	/* We got a read error during recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	 * We repeat the read in smaller page-sized sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	 * If a read succeeds, write it to the new device or record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	 * a bad block if we cannot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	 * If a read fails, record a bad block on both old and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	 * new devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	struct mddev *mddev = r10_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	struct bio *bio = r10_bio->devs[0].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	sector_t sect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	int sectors = r10_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	int dr = r10_bio->devs[0].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	int dw = r10_bio->devs[1].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	struct page **pages = get_resync_pages(bio)->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	while (sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		int s = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		sector_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		int ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		if (s > (PAGE_SIZE>>9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 			s = PAGE_SIZE >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		rdev = conf->mirrors[dr].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		addr = r10_bio->devs[0].addr + sect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		ok = sync_page_io(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 				  addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 				  s << 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 				  pages[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 				  REQ_OP_READ, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		if (ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 			rdev = conf->mirrors[dw].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 			addr = r10_bio->devs[1].addr + sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 			ok = sync_page_io(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 					  addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 					  s << 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 					  pages[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 					  REQ_OP_WRITE, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			if (!ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 				set_bit(WriteErrorSeen, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 				if (!test_and_set_bit(WantReplacement,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 						      &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 					set_bit(MD_RECOVERY_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 						&rdev->mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		if (!ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 			/* We don't worry if we cannot set a bad block -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 			 * it really is bad so there is no loss in not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 			 * recording it yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 			rdev_set_badblocks(rdev, addr, s, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			if (rdev != conf->mirrors[dw].rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				/* need bad block on destination too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 				addr = r10_bio->devs[1].addr + sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 				ok = rdev_set_badblocks(rdev2, addr, s, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 				if (!ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 					/* just abort the recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 					pr_notice("md/raid10:%s: recovery aborted due to read error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 						  mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 					conf->mirrors[dw].recovery_disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 						= mddev->recovery_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 					set_bit(MD_RECOVERY_INTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 						&mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		sectors -= s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		sect += s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	int d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	struct bio *wbio, *wbio2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		fix_recovery_read_error(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		end_sync_request(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	 * share the pages with the first bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	 * and submit the write request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	d = r10_bio->devs[1].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	wbio = r10_bio->devs[1].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	wbio2 = r10_bio->devs[1].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	/* Need to test wbio2->bi_end_io before we call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	 * submit_bio_noacct as if the former is NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	 * the latter is free to free wbio2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	if (wbio2 && !wbio2->bi_end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		wbio2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	if (wbio->bi_end_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		submit_bio_noacct(wbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	if (wbio2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		md_sync_acct(conf->mirrors[d].replacement->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 			     bio_sectors(wbio2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		submit_bio_noacct(wbio2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)  * Used by fix_read_error() to decay the per rdev read_errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)  * We halve the read error count for every hour that has elapsed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)  * since the last recorded read error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	long cur_time_mon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	unsigned long hours_since_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	unsigned int read_errors = atomic_read(&rdev->read_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	cur_time_mon = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	if (rdev->last_read_error == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		/* first time we've seen a read error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		rdev->last_read_error = cur_time_mon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	hours_since_last = (long)(cur_time_mon -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 			    rdev->last_read_error) / 3600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	rdev->last_read_error = cur_time_mon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	 * if hours_since_last is > the number of bits in read_errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	 * just set read errors to 0. We do this to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	 * overflowing the shift of read_errors by hours_since_last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	if (hours_since_last >= 8 * sizeof(read_errors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		atomic_set(&rdev->read_errors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 			    int sectors, struct page *page, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	    && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		/* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	if (rw == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		set_bit(WriteErrorSeen, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			set_bit(MD_RECOVERY_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 				&rdev->mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	/* need to record an error - either for the block or the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		md_error(rdev->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)  * This is a kernel thread which:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)  *	1.	Retries failed read operations on working mirrors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)  *	2.	Updates the raid superblock when problems encounter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)  *	3.	Performs writes following reads for array synchronising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	int sect = 0; /* Offset from r10_bio->sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	int sectors = r10_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	int d = r10_bio->devs[r10_bio->read_slot].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	/* still own a reference to this rdev, so it cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	 * have been cleared recently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	if (test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		/* drive has already been failed, just ignore any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		   more fix_read_error() attempts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	check_decay_read_errors(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	atomic_inc(&rdev->read_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	if (atomic_read(&rdev->read_errors) > max_read_errors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		bdevname(rdev->bdev, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 			  mdname(mddev), b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 			  atomic_read(&rdev->read_errors), max_read_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		pr_notice("md/raid10:%s: %s: Failing raid device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 			  mdname(mddev), b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	while(sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		int s = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		int sl = r10_bio->read_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		int success = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		if (s > (PAGE_SIZE>>9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 			s = PAGE_SIZE >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 			sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 			int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 			d = r10_bio->devs[sl].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 			rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 			if (rdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 			    test_bit(In_sync, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 			    !test_bit(Faulty, &rdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 					&first_bad, &bad_sectors) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 				atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 				success = sync_page_io(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 						       r10_bio->devs[sl].addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 						       sect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 						       s<<9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 						       conf->tmppage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 						       REQ_OP_READ, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 				rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 				rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 				if (success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 			sl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 			if (sl == conf->copies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 				sl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 		} while (!success && sl != r10_bio->read_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		if (!success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 			/* Cannot read from anywhere, just mark the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 			 * as bad on the first device to discourage future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 			 * reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 			rdev = conf->mirrors[dn].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 			if (!rdev_set_badblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 				    rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 				    r10_bio->devs[r10_bio->read_slot].addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 				    + sect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 				    s, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 				md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 				r10_bio->devs[r10_bio->read_slot].bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 					= IO_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		start = sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		/* write it back and re-read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		while (sl != r10_bio->read_slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 			char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 			if (sl==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 				sl = conf->copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 			sl--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 			d = r10_bio->devs[sl].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 			rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 			if (!rdev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 			    test_bit(Faulty, &rdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 			    !test_bit(In_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 			if (r10_sync_page_io(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 					     r10_bio->devs[sl].addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 					     sect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 					     s, conf->tmppage, WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 			    == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 				/* Well, this device is dead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 				pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 					  mdname(mddev), s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 					  (unsigned long long)(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 						  sect +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 						  choose_data_offset(r10_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 								     rdev)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 					  bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 				pr_notice("md/raid10:%s: %s: failing drive\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 					  mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 					  bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 			rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		sl = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		while (sl != r10_bio->read_slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 			char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 			if (sl==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 				sl = conf->copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			sl--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 			d = r10_bio->devs[sl].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 			rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 			if (!rdev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 			    test_bit(Faulty, &rdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 			    !test_bit(In_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 			atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 			switch (r10_sync_page_io(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 					     r10_bio->devs[sl].addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 					     sect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 					     s, conf->tmppage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 						 READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 			case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 				/* Well, this device is dead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 				pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 				       mdname(mddev), s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 				       (unsigned long long)(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 					       sect +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 					       choose_data_offset(r10_bio, rdev)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 				       bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 				pr_notice("md/raid10:%s: %s: failing drive\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 				       mdname(mddev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 				       bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 			case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 				pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 				       mdname(mddev), s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 				       (unsigned long long)(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 					       sect +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 					       choose_data_offset(r10_bio, rdev)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 				       bdevname(rdev->bdev, b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 				atomic_add(s, &rdev->corrected_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 			rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		sectors -= s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		sect += s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) static int narrow_write_error(struct r10bio *r10_bio, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	struct bio *bio = r10_bio->master_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	struct mddev *mddev = r10_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	/* bio has the data to be written to slot 'i' where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	 * we just recently had a write error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	 * We repeatedly clone the bio and trim down to one block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	 * then try the write.  Where the write fails we record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	 * a bad block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	 * It is conceivable that the bio doesn't exactly align with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	 * blocks.  We must handle this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	 * We currently own a reference to the rdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	int block_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	int sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	int sect_to_write = r10_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	int ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	if (rdev->badblocks.shift < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	block_sectors = roundup(1 << rdev->badblocks.shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 				bdev_logical_block_size(rdev->bdev) >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	sector = r10_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	sectors = ((r10_bio->sector + block_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		   & ~(sector_t)(block_sectors - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		- sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	while (sect_to_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		struct bio *wbio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		sector_t wsector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		if (sectors > sect_to_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 			sectors = sect_to_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		/* Write at 'sector' for 'sectors' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 		wbio->bi_iter.bi_sector = wsector +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 				   choose_data_offset(r10_bio, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		bio_set_dev(wbio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		if (submit_bio_wait(wbio) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 			/* Failure! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 			ok = rdev_set_badblocks(rdev, wsector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 						sectors, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 				&& ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 		bio_put(wbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		sect_to_write -= sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		sector += sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 		sectors = block_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	return ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	int slot = r10_bio->read_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	/* we got a read error. Maybe the drive is bad.  Maybe just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	 * the block and we can fix it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	 * We freeze all other IO, and try reading the block from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	 * other devices.  When we find one, we re-write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	 * and check it that fixes the read error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	 * This is all done synchronously while the array is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	 * frozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	bio = r10_bio->devs[slot].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	r10_bio->devs[slot].bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	if (mddev->ro)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		r10_bio->devs[slot].bio = IO_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	else if (!test_bit(FailFast, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		freeze_array(conf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		fix_read_error(conf, mddev, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		unfreeze_array(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	allow_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	r10_bio->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	/* Some sort of write request has finished and it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	 * succeeded in writing where we thought there was a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	 * bad block.  So forget the bad block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	 * Or possibly if failed and we need to record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	 * a bad block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	int m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		for (m = 0; m < conf->copies; m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 			int dev = r10_bio->devs[m].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 			rdev = conf->mirrors[dev].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 			if (r10_bio->devs[m].bio == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 				r10_bio->devs[m].bio->bi_end_io == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 			if (!r10_bio->devs[m].bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 				rdev_clear_badblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 					rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 					r10_bio->devs[m].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 					r10_bio->sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 				if (!rdev_set_badblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 					    rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 					    r10_bio->devs[m].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 					    r10_bio->sectors, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 					md_error(conf->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 			rdev = conf->mirrors[dev].replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 			if (r10_bio->devs[m].repl_bio == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 				r10_bio->devs[m].repl_bio->bi_end_io == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 			if (!r10_bio->devs[m].repl_bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 				rdev_clear_badblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 					rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 					r10_bio->devs[m].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 					r10_bio->sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 				if (!rdev_set_badblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 					    rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 					    r10_bio->devs[m].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 					    r10_bio->sectors, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 					md_error(conf->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		put_buf(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 		bool fail = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		for (m = 0; m < conf->copies; m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 			int dev = r10_bio->devs[m].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 			struct bio *bio = r10_bio->devs[m].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 			rdev = conf->mirrors[dev].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 			if (bio == IO_MADE_GOOD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 				rdev_clear_badblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 					rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 					r10_bio->devs[m].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 					r10_bio->sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 				rdev_dec_pending(rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 			} else if (bio != NULL && bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 				fail = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 				if (!narrow_write_error(r10_bio, m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 					md_error(conf->mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 					set_bit(R10BIO_Degraded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 						&r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 				rdev_dec_pending(rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 			bio = r10_bio->devs[m].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 			rdev = conf->mirrors[dev].replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 			if (rdev && bio == IO_MADE_GOOD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 				rdev_clear_badblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 					rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 					r10_bio->devs[m].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 					r10_bio->sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 				rdev_dec_pending(rdev, conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		if (fail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 			spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 			conf->nr_queued++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 			spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 			 * In case freeze_array() is waiting for condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 			 * nr_pending == nr_queued + extra to be true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 			wake_up(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 			md_wakeup_thread(conf->mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 			if (test_bit(R10BIO_WriteError,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 				     &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 				close_write(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 			raid_end_bio_io(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) static void raid10d(struct md_thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	struct mddev *mddev = thread->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	struct r10bio *r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	struct list_head *head = &conf->retry_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	md_check_recovery(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	if (!list_empty_careful(&conf->bio_end_io_list) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		LIST_HEAD(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 			while (!list_empty(&conf->bio_end_io_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 				list_move(conf->bio_end_io_list.prev, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 				conf->nr_queued--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		while (!list_empty(&tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 			r10_bio = list_first_entry(&tmp, struct r10bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 						   retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 			list_del(&r10_bio->retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 			if (mddev->degraded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 				set_bit(R10BIO_Degraded, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 			if (test_bit(R10BIO_WriteError,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 				     &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 				close_write(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 			raid_end_bio_io(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		flush_pending_writes(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		spin_lock_irqsave(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		if (list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 			spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		list_del(head->prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		conf->nr_queued--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		spin_unlock_irqrestore(&conf->device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 		mddev = r10_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		    test_bit(R10BIO_WriteError, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 			handle_write_completed(conf, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 			reshape_request_write(mddev, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 			sync_request_write(mddev, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 			recovery_request_write(mddev, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 			handle_read_error(mddev, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 			WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 			md_check_recovery(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) static int init_resync(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	int ret, buffs, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	BUG_ON(mempool_initialized(&conf->r10buf_pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	conf->have_replacement = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	for (i = 0; i < conf->geo.raid_disks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		if (conf->mirrors[i].replacement)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 			conf->have_replacement = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	ret = mempool_init(&conf->r10buf_pool, buffs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 			   r10buf_pool_alloc, r10buf_pool_free, conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	conf->next_resync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	struct rsync_pages *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	int nalloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		nalloc = conf->copies; /* resync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		nalloc = 2; /* recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	for (i = 0; i < nalloc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		bio = r10bio->devs[i].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		rp = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 		bio_reset(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		bio->bi_private = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		bio = r10bio->devs[i].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 			rp = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 			bio_reset(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 			bio->bi_private = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	return r10bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)  * Set cluster_sync_high since we need other nodes to add the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)  * range [cluster_sync_low, cluster_sync_high] to suspend list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) static void raid10_set_cluster_sync_high(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	sector_t window_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	int extra_chunk, chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	 * First, here we define "stripe" as a unit which across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	 * all member devices one time, so we get chunks by use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	 * raid_disks / near_copies. Otherwise, if near_copies is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	 * close to raid_disks, then resync window could increases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	 * linearly with the increase of raid_disks, which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	 * we will suspend a really large IO window while it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	 * necessary. If raid_disks is not divisible by near_copies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	 * an extra chunk is needed to ensure the whole "stripe" is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	 * covered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	chunks = conf->geo.raid_disks / conf->geo.near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	if (conf->geo.raid_disks % conf->geo.near_copies == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		extra_chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 		extra_chunk = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	 * At least use a 32M window to align with raid1's resync window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 			CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	conf->cluster_sync_high = conf->cluster_sync_low + window_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)  * perform a "sync" on one "block"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)  * We need to make sure that no normal I/O request - particularly write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)  * requests - conflict with active sync requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)  * This is achieved by tracking pending requests and a 'barrier' concept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)  * that can be installed to exclude normal IO requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)  * Resync and recovery are handled very differently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)  * For resync, we iterate over virtual addresses, read all copies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)  * and update if there are differences.  If only one copy is live,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)  * skip it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)  * For recovery, we iterate over physical addresses, read a good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)  * value for each non-in_sync drive, and over-write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)  * So, for recovery we may have several outstanding complex requests for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)  * given address, one for each out-of-sync device.  We model this by allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)  * a number of r10_bio structures, one for each out-of-sync device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)  * As we setup these structures, we collect all bio's together into a list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)  * which we then process collectively to add pages, and then process again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)  * to pass to submit_bio_noacct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)  * The r10_bio structures are linked using a borrowed master_bio pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)  * This link is counted in ->remaining.  When the r10_bio that points to NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)  * has its remaining count decremented to 0, the whole complex operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)  * is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 			     int *skipped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	struct r10bio *r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	struct bio *biolist = NULL, *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	sector_t max_sector, nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	int max_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	sector_t sync_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	sector_t sectors_skipped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	int chunks_skipped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	sector_t chunk_mask = conf->geo.chunk_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	int page_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	if (!mempool_initialized(&conf->r10buf_pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		if (init_resync(conf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	 * Allow skipping a full rebuild for incremental assembly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	 * of a clean array, like RAID1 does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	if (mddev->bitmap == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	    mddev->recovery_cp == MaxSector &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	    mddev->reshape_position == MaxSector &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	    conf->fullsync == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 		*skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		return mddev->dev_sectors - sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)  skipped:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	max_sector = mddev->dev_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 		max_sector = mddev->resync_max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	if (sector_nr >= max_sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 		conf->cluster_sync_low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		conf->cluster_sync_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		/* If we aborted, we need to abort the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 		 * sync on the 'current' bitmap chucks (there can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 		 * be several when recovering multiple devices).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 		 * as we may have started syncing it but not finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 		 * We can find the current address in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 		 * mddev->curr_resync, but for recovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		 * we need to convert that to several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		 * virtual addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 			end_reshape(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 			close_sync(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 		if (mddev->curr_resync < max_sector) { /* aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 				md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 						   &sync_blocks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 			else for (i = 0; i < conf->geo.raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 				sector_t sect =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 					raid10_find_virt(conf, mddev->curr_resync, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 				md_bitmap_end_sync(mddev->bitmap, sect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 						   &sync_blocks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 			/* completed sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 			if ((!mddev->bitmap || conf->fullsync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 			    && conf->have_replacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 				/* Completed a full sync so the replacements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 				 * are now fully recovered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 				rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 				for (i = 0; i < conf->geo.raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 					struct md_rdev *rdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 						rcu_dereference(conf->mirrors[i].replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 					if (rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 						rdev->recovery_offset = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 			conf->fullsync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		md_bitmap_close_sync(mddev->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 		close_sync(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		*skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 		return sectors_skipped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		return reshape_request(mddev, sector_nr, skipped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	if (chunks_skipped >= conf->geo.raid_disks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 		/* if there has been nothing to do on any drive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		 * then there is nothing to do at all..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 		*skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		return (max_sector - sector_nr) + sectors_skipped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	if (max_sector > mddev->resync_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	/* make sure whole request will fit in a chunk - if chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	 * are meaningful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	if (conf->geo.near_copies < conf->geo.raid_disks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	    max_sector > (sector_nr | chunk_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 		max_sector = (sector_nr | chunk_mask) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	 * If there is non-resync activity waiting for a turn, then let it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	 * though before starting on this new sync request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	if (conf->nr_waiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	/* Again, very different code for resync and recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	 * Both must result in an r10bio with a list of bios that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	 * have bi_end_io, bi_sector, bi_disk set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	 * and bi_private set to the r10bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	 * For recovery, we may actually create several r10bios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	 * with 2 bios in each, that correspond to the bios in the main one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	 * In this case, the subordinate r10bios link back through a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	 * borrowed master_bio pointer, and the counter in the master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	 * includes a ref from each subordinate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	/* First, we decide what to do and set ->bi_end_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	 * To end_sync_read if we want to read, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	 * end_sync_write if we will want to write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		/* recovery... the complicated one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 		r10_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 		for (i = 0 ; i < conf->geo.raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 			int still_degraded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 			struct r10bio *rb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 			sector_t sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 			int must_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 			int any_working;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 			int need_recover = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 			int need_replace = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 			struct raid10_info *mirror = &conf->mirrors[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 			struct md_rdev *mrdev, *mreplace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 			mrdev = rcu_dereference(mirror->rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 			mreplace = rcu_dereference(mirror->replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 			if (mrdev != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 			    !test_bit(Faulty, &mrdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 			    !test_bit(In_sync, &mrdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 				need_recover = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 			if (mreplace != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 			    !test_bit(Faulty, &mreplace->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 				need_replace = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 			if (!need_recover && !need_replace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 			still_degraded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 			/* want to reconstruct this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 			rb2 = r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 			sect = raid10_find_virt(conf, sector_nr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 			if (sect >= mddev->resync_max_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 				/* last stripe is not complete - don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 				 * try to recover this sector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 			if (mreplace && test_bit(Faulty, &mreplace->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 				mreplace = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 			/* Unless we are doing a full sync, or a replacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 			 * we only need to recover the block if it is set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 			 * the bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 							 &sync_blocks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 			if (sync_blocks < max_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 				max_sync = sync_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 			if (!must_sync &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 			    mreplace == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 			    !conf->fullsync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 				/* yep, skip the sync_blocks here, but don't assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 				 * that there will never be anything to do here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 				chunks_skipped = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 			atomic_inc(&mrdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 			if (mreplace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 				atomic_inc(&mreplace->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 			r10_bio = raid10_alloc_init_r10buf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 			r10_bio->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 			raise_barrier(conf, rb2 != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 			atomic_set(&r10_bio->remaining, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 			r10_bio->master_bio = (struct bio*)rb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 			if (rb2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 				atomic_inc(&rb2->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 			r10_bio->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 			set_bit(R10BIO_IsRecover, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 			r10_bio->sector = sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 			raid10_find_phys(conf, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 			/* Need to check if the array will still be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 			 * degraded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 			for (j = 0; j < conf->geo.raid_disks; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 				struct md_rdev *rdev = rcu_dereference(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 					conf->mirrors[j].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 				if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 					still_degraded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 							 &sync_blocks, still_degraded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 			any_working = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 			for (j=0; j<conf->copies;j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 				int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 				int d = r10_bio->devs[j].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 				sector_t from_addr, to_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 				struct md_rdev *rdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 					rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 				sector_t sector, first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 				int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 				if (!rdev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 				    !test_bit(In_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 				/* This is where we read from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 				any_working = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 				sector = r10_bio->devs[j].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 				if (is_badblock(rdev, sector, max_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 						&first_bad, &bad_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 					if (first_bad > sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 						max_sync = first_bad - sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 					else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 						bad_sectors -= (sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 								- first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 						if (max_sync > bad_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 							max_sync = bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 						continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 				bio = r10_bio->devs[0].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 				bio->bi_next = biolist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 				biolist = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 				bio->bi_end_io = end_sync_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 				bio_set_op_attrs(bio, REQ_OP_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 				if (test_bit(FailFast, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 					bio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 				from_addr = r10_bio->devs[j].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 				bio->bi_iter.bi_sector = from_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 					rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 				bio_set_dev(bio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 				atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 				/* and we write to 'i' (if not in_sync) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 				for (k=0; k<conf->copies; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 					if (r10_bio->devs[k].devnum == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 				BUG_ON(k == conf->copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 				to_addr = r10_bio->devs[k].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 				r10_bio->devs[0].devnum = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 				r10_bio->devs[0].addr = from_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 				r10_bio->devs[1].devnum = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 				r10_bio->devs[1].addr = to_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 				if (need_recover) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 					bio = r10_bio->devs[1].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 					bio->bi_next = biolist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 					biolist = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 					bio->bi_end_io = end_sync_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 					bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 					bio->bi_iter.bi_sector = to_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 						+ mrdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 					bio_set_dev(bio, mrdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 					atomic_inc(&r10_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 				} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 					r10_bio->devs[1].bio->bi_end_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 				/* and maybe write to replacement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 				bio = r10_bio->devs[1].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 				if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 					bio->bi_end_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 				/* Note: if need_replace, then bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 				 * cannot be NULL as r10buf_pool_alloc will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 				 * have allocated it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 				if (!need_replace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 				bio->bi_next = biolist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 				biolist = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 				bio->bi_end_io = end_sync_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 				bio->bi_iter.bi_sector = to_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 					mreplace->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 				bio_set_dev(bio, mreplace->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 				atomic_inc(&r10_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 			if (j == conf->copies) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 				/* Cannot recover, so abort the recovery or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 				 * record a bad block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 				if (any_working) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 					/* problem is that there are bad blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 					 * on other device(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 					int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 					for (k = 0; k < conf->copies; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 						if (r10_bio->devs[k].devnum == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 							break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 					if (!test_bit(In_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 						      &mrdev->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 					    && !rdev_set_badblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 						    mrdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 						    r10_bio->devs[k].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 						    max_sync, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 						any_working = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 					if (mreplace &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 					    !rdev_set_badblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 						    mreplace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 						    r10_bio->devs[k].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 						    max_sync, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 						any_working = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 				if (!any_working)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 					if (!test_and_set_bit(MD_RECOVERY_INTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 							      &mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 						pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 						       mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 					mirror->recovery_disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 						= mddev->recovery_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 				put_buf(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 				if (rb2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 					atomic_dec(&rb2->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 				r10_bio = rb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 				rdev_dec_pending(mrdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 				if (mreplace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 					rdev_dec_pending(mreplace, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 			rdev_dec_pending(mrdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 			if (mreplace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 				rdev_dec_pending(mreplace, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 				/* Only want this if there is elsewhere to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 				 * read from. 'j' is currently the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 				 * readable copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 				int targets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 				for (; j < conf->copies; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 					int d = r10_bio->devs[j].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 					if (conf->mirrors[d].rdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 					    test_bit(In_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 						      &conf->mirrors[d].rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 						targets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 				if (targets == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 					r10_bio->devs[0].bio->bi_opf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 						&= ~MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		if (biolist == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 			while (r10_bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 				struct r10bio *rb2 = r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 				r10_bio = (struct r10bio*) rb2->master_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 				rb2->master_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 				put_buf(rb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 			goto giveup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 		/* resync. Schedule a read for every block at this virt offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 		int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 		 * Since curr_resync_completed could probably not update in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 		 * time, and we will set cluster_sync_low based on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 		 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 		 * safety reason, which ensures curr_resync_completed is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 		 * updated in bitmap_cond_end_sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 					mddev_is_clustered(mddev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 					(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 					  &sync_blocks, mddev->degraded) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 						 &mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 			/* We can skip this block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 			*skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 			return sync_blocks + sectors_skipped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 		if (sync_blocks < max_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 			max_sync = sync_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 		r10_bio = raid10_alloc_init_r10buf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 		r10_bio->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 		r10_bio->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 		atomic_set(&r10_bio->remaining, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 		raise_barrier(conf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		conf->next_resync = sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 		r10_bio->master_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 		r10_bio->sector = sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		set_bit(R10BIO_IsSync, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		raid10_find_phys(conf, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		for (i = 0; i < conf->copies; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 			int d = r10_bio->devs[i].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 			sector_t first_bad, sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 			int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 			struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 			if (r10_bio->devs[i].repl_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 			bio = r10_bio->devs[i].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 			bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 			rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 			sector = r10_bio->devs[i].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 			if (is_badblock(rdev, sector, max_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 					&first_bad, &bad_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 				if (first_bad > sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 					max_sync = first_bad - sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 				else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 					bad_sectors -= (sector - first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 					if (max_sync > bad_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 						max_sync = bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 					rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 			atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 			atomic_inc(&r10_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 			bio->bi_next = biolist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 			biolist = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 			bio->bi_end_io = end_sync_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 			if (test_bit(FailFast, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 				bio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 			bio_set_dev(bio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 			rdev = rcu_dereference(conf->mirrors[d].replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 			atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 			/* Need to set up for writing to the replacement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 			bio = r10_bio->devs[i].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 			bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 			sector = r10_bio->devs[i].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 			bio->bi_next = biolist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 			biolist = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 			bio->bi_end_io = end_sync_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 			if (test_bit(FailFast, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 				bio->bi_opf |= MD_FAILFAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 			bio_set_dev(bio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 		if (count < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 			for (i=0; i<conf->copies; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 				int d = r10_bio->devs[i].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 				if (r10_bio->devs[i].bio->bi_end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 					rdev_dec_pending(conf->mirrors[d].rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 							 mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 				if (r10_bio->devs[i].repl_bio &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 				    r10_bio->devs[i].repl_bio->bi_end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 					rdev_dec_pending(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 						conf->mirrors[d].replacement,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 						mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 			put_buf(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 			biolist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 			goto giveup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	nr_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	if (sector_nr + max_sync < max_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 		max_sector = sector_nr + max_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		int len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 		if (sector_nr + (len>>9) > max_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 			len = (max_sector - sector_nr) << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 		if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 		for (bio= biolist ; bio ; bio=bio->bi_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 			struct resync_pages *rp = get_resync_pages(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 			page = resync_fetch_page(rp, page_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 			 * won't fail because the vec table is big enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 			 * to hold all these pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 			bio_add_page(bio, page, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 		nr_sectors += len>>9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 		sector_nr += len>>9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 	} while (++page_idx < RESYNC_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	r10_bio->sectors = nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	if (mddev_is_clustered(mddev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		/* It is resync not recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		if (conf->cluster_sync_high < sector_nr + nr_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 			conf->cluster_sync_low = mddev->curr_resync_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 			raid10_set_cluster_sync_high(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 			/* Send resync message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 			md_cluster_ops->resync_info_update(mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 						conf->cluster_sync_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 						conf->cluster_sync_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	} else if (mddev_is_clustered(mddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 		/* This is recovery not resync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 		sector_t sect_va1, sect_va2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 		bool broadcast_msg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		for (i = 0; i < conf->geo.raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 			 * sector_nr is a device address for recovery, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 			 * need translate it to array address before compare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 			 * with cluster_sync_high.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 			sect_va1 = raid10_find_virt(conf, sector_nr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 			if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 				broadcast_msg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 				 * curr_resync_completed is similar as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 				 * sector_nr, so make the translation too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 				sect_va2 = raid10_find_virt(conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 					mddev->curr_resync_completed, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 				if (conf->cluster_sync_low == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 				    conf->cluster_sync_low > sect_va2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 					conf->cluster_sync_low = sect_va2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 		if (broadcast_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 			raid10_set_cluster_sync_high(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 			md_cluster_ops->resync_info_update(mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 						conf->cluster_sync_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 						conf->cluster_sync_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	while (biolist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		bio = biolist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 		biolist = biolist->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 		bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 		r10_bio = get_resync_r10bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 		r10_bio->sectors = nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 		if (bio->bi_end_io == end_sync_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 			md_sync_acct_bio(bio, nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 			bio->bi_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 			submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	if (sectors_skipped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 		/* pretend they weren't skipped, it makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 		 * no important difference in this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 		md_done_sync(mddev, sectors_skipped, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	return sectors_skipped + nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494)  giveup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	/* There is nowhere to write, so all non-sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 	 * drives must be failed or in resync, all drives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	 * have a bad block, so try the next chunk...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	if (sector_nr + max_sync < max_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 		max_sector = sector_nr + max_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	sectors_skipped += (max_sector - sector_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	chunks_skipped ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	sector_nr = max_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	goto skipped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) static sector_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	sector_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 	if (!raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 		raid_disks = min(conf->geo.raid_disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 				 conf->prev.raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 	if (!sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 		sectors = conf->dev_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 	size = sectors >> conf->geo.chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	sector_div(size, conf->geo.far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	size = size * raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	sector_div(size, conf->geo.near_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	return size << conf->geo.chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) static void calc_sectors(struct r10conf *conf, sector_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	/* Calculate the number of sectors-per-device that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 	 * actually be used, and set conf->dev_sectors and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	 * conf->stride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 	size = size >> conf->geo.chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 	sector_div(size, conf->geo.far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 	size = size * conf->geo.raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 	sector_div(size, conf->geo.near_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	/* 'size' is now the number of chunks in the array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	/* calculate "used chunks per device" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	size = size * conf->copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	/* We need to round up when dividing by raid_disks to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	 * get the stride size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 	conf->dev_sectors = size << conf->geo.chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 	if (conf->geo.far_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 		conf->geo.stride = 1 << conf->geo.chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 		sector_div(size, conf->geo.far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 		conf->geo.stride = size << conf->geo.chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) enum geo_type {geo_new, geo_old, geo_start};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	int nc, fc, fo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	int layout, chunk, disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	switch (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	case geo_old:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 		layout = mddev->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 		chunk = mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 		disks = mddev->raid_disks - mddev->delta_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	case geo_new:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 		layout = mddev->new_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 		chunk = mddev->new_chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 		disks = mddev->raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	default: /* avoid 'may be unused' warnings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	case geo_start: /* new when starting reshape - raid_disks not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 			 * updated yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 		layout = mddev->new_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 		chunk = mddev->new_chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 		disks = mddev->raid_disks + mddev->delta_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	if (layout >> 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	if (chunk < (PAGE_SIZE >> 9) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	    !is_power_of_2(chunk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 		return -2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	nc = layout & 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	fc = (layout >> 8) & 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	fo = layout & (1<<16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	geo->raid_disks = disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	geo->near_copies = nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 	geo->far_copies = fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	geo->far_offset = fo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	switch (layout >> 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	case 0:	/* original layout.  simple but not always optimal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 		geo->far_set_size = disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	case 1: /* "improved" layout which was buggy.  Hopefully no-one is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 		 * actually using this, but leave code here just in case.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 		geo->far_set_size = disks/fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 		WARN(geo->far_set_size < fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 		     "This RAID10 layout does not provide data safety - please backup and create new array\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	case 2: /* "improved" layout fixed to match documentation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 		geo->far_set_size = fc * nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 	default: /* Not a valid layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 	geo->chunk_mask = chunk - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	geo->chunk_shift = ffz(~chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	return nc*fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) static struct r10conf *setup_conf(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	struct r10conf *conf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	struct geom geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	int copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	copies = setup_geo(&geo, mddev, geo_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	if (copies == -2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 		pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 			mdname(mddev), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	if (copies < 2 || copies > mddev->raid_disks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 		pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 			mdname(mddev), mddev->new_layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 	if (!conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 	/* FIXME calc properly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 				sizeof(struct raid10_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 	if (!conf->mirrors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	conf->tmppage = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	if (!conf->tmppage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	conf->geo = geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	conf->copies = copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 			   rbio_pool_free, conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	calc_sectors(conf, mddev->dev_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 	if (mddev->reshape_position == MaxSector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 		conf->prev = conf->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 		conf->reshape_progress = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 		conf->reshape_progress = mddev->reshape_position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 		if (conf->prev.far_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 			conf->prev.stride = 1 << conf->prev.chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 			/* far_copies must be 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 			conf->prev.stride = conf->dev_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	conf->reshape_safe = conf->reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	spin_lock_init(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 	INIT_LIST_HEAD(&conf->retry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 	INIT_LIST_HEAD(&conf->bio_end_io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	spin_lock_init(&conf->resync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 	init_waitqueue_head(&conf->wait_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 	atomic_set(&conf->nr_pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 	conf->thread = md_register_thread(raid10d, mddev, "raid10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	if (!conf->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 	conf->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	return conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	if (conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 		mempool_exit(&conf->r10bio_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 		kfree(conf->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 		safe_put_page(conf->tmppage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 		bioset_exit(&conf->bio_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 		kfree(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) static void raid10_set_io_opt(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	int raid_disks = conf->geo.raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	if (!(conf->geo.raid_disks % conf->geo.near_copies))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 		raid_disks /= conf->geo.near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 			 raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) static int raid10_run(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	struct r10conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 	int i, disk_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 	struct raid10_info *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 	sector_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 	sector_t min_offset_diff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	int first = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 	bool discard_supported = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 	if (mddev_init_writes_pending(mddev) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	if (mddev->private == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 		conf = setup_conf(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 		if (IS_ERR(conf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 			return PTR_ERR(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 		mddev->private = conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 	if (!conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 	if (mddev_is_clustered(conf->mddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 		int fc, fo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		fc = (mddev->layout >> 8) & 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 		fo = mddev->layout & (1<<16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 		if (fc > 1 || fo > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 			pr_err("only near layout is supported by clustered"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 				" raid10\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 			goto out_free_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	mddev->thread = conf->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 	conf->thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 	if (mddev->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 		blk_queue_max_discard_sectors(mddev->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 					      mddev->chunk_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 		blk_queue_max_write_same_sectors(mddev->queue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 		raid10_set_io_opt(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 	rdev_for_each(rdev, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 		long long diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 		disk_idx = rdev->raid_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 		if (disk_idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 		if (disk_idx >= conf->geo.raid_disks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 		    disk_idx >= conf->prev.raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 		disk = conf->mirrors + disk_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 		if (test_bit(Replacement, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 			if (disk->replacement)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 				goto out_free_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 			disk->replacement = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 			if (disk->rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 				goto out_free_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 			disk->rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 		diff = (rdev->new_data_offset - rdev->data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 		if (!mddev->reshape_backwards)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 			diff = -diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 		if (diff < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 			diff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 		if (first || diff < min_offset_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 			min_offset_diff = diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 		if (mddev->gendisk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 			disk_stack_limits(mddev->gendisk, rdev->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 					  rdev->data_offset << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 		disk->head_position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 			discard_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 		first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	if (mddev->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 		if (discard_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 			blk_queue_flag_set(QUEUE_FLAG_DISCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 						mddev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 			blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 						  mddev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 	/* need to check that every block has at least one working mirror */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 	if (!enough(conf, -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 		pr_err("md/raid10:%s: not enough operational mirrors.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 		       mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 		goto out_free_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 	if (conf->reshape_progress != MaxSector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 		/* must ensure that shape change is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 		if (conf->geo.far_copies != 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 		    conf->geo.far_offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 			goto out_free_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 		if (conf->prev.far_copies != 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 		    conf->prev.far_offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 			goto out_free_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	mddev->degraded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 	for (i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 	     i < conf->geo.raid_disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 		     || i < conf->prev.raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 	     i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 		disk = conf->mirrors + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 		if (!disk->rdev && disk->replacement) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 			/* The replacement is all we have - use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 			disk->rdev = disk->replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 			disk->replacement = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 			clear_bit(Replacement, &disk->rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 		if (!disk->rdev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 		    !test_bit(In_sync, &disk->rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 			disk->head_position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 			mddev->degraded++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 			if (disk->rdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 			    disk->rdev->saved_raid_disk < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 				conf->fullsync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 		if (disk->replacement &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 		    !test_bit(In_sync, &disk->replacement->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 		    disk->replacement->saved_raid_disk < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 			conf->fullsync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 		disk->recovery_disabled = mddev->recovery_disabled - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 	if (mddev->recovery_cp != MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 		pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 			  mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 	pr_info("md/raid10:%s: active with %d out of %d devices\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 		conf->geo.raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 	 * Ok, everything is just fine now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 	mddev->dev_sectors = conf->dev_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 	size = raid10_size(mddev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 	md_set_array_sectors(mddev, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 	mddev->resync_max_sectors = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 	if (md_integrity_register(mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 		goto out_free_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 	if (conf->reshape_progress != MaxSector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 		unsigned long before_length, after_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 		before_length = ((1 << conf->prev.chunk_shift) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 				 conf->prev.far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 		after_length = ((1 << conf->geo.chunk_shift) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 				conf->geo.far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 		if (max(before_length, after_length) > min_offset_diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 			/* This cannot work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 			pr_warn("md/raid10: offset difference not enough to continue reshape\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 			goto out_free_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 		conf->offset_diff = min_offset_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 							"reshape");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 		if (!mddev->sync_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 			goto out_free_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) out_free_conf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 	md_unregister_thread(&mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 	mempool_exit(&conf->r10bio_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 	safe_put_page(conf->tmppage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 	kfree(conf->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 	kfree(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 	mddev->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) static void raid10_free(struct mddev *mddev, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 	struct r10conf *conf = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 	mempool_exit(&conf->r10bio_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 	safe_put_page(conf->tmppage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 	kfree(conf->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	kfree(conf->mirrors_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 	kfree(conf->mirrors_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 	bioset_exit(&conf->bio_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 	kfree(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) static void raid10_quiesce(struct mddev *mddev, int quiesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 	if (quiesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 		raise_barrier(conf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 		lower_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) static int raid10_resize(struct mddev *mddev, sector_t sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 	/* Resize of 'far' arrays is not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	 * For 'near' and 'offset' arrays we can set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 	 * number of sectors used to be an appropriate multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 	 * of the chunk size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 	 * For 'offset', this is far_copies*chunksize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 	 * For 'near' the multiplier is the LCM of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	 * near_copies and raid_disks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 	 * So if far_copies > 1 && !far_offset, fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 	 * Else find LCM(raid_disks, near_copy)*far_copies and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 	 * multiply by chunk_size.  Then round to this number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	 * This is mostly done by raid10_size()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 	sector_t oldsize, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	if (mddev->reshape_position != MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 	oldsize = raid10_size(mddev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 	size = raid10_size(mddev, sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	if (mddev->external_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	    mddev->array_sectors > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 	if (mddev->bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 		int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 	md_set_array_sectors(mddev, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 	if (sectors > mddev->dev_sectors &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 	    mddev->recovery_cp > oldsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 		mddev->recovery_cp = oldsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	calc_sectors(conf, sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	mddev->dev_sectors = conf->dev_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	mddev->resync_max_sectors = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 	struct r10conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 	if (mddev->degraded > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 		pr_warn("md/raid10:%s: Error: degraded raid0!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 			mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 	sector_div(size, devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 	/* Set new parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 	mddev->new_level = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 	/* new layout: far_copies = 1, near_copies = 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 	mddev->new_layout = (1<<8) + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 	mddev->new_chunk_sectors = mddev->chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 	mddev->delta_disks = mddev->raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 	mddev->raid_disks *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 	/* make sure it will be not marked as dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 	mddev->recovery_cp = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 	mddev->dev_sectors = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 	conf = setup_conf(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 	if (!IS_ERR(conf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 		rdev_for_each(rdev, mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 			if (rdev->raid_disk >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 				rdev->new_raid_disk = rdev->raid_disk * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 				rdev->sectors = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 		conf->barrier = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 	return conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) static void *raid10_takeover(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 	struct r0conf *raid0_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 	/* raid10 can take over:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 	 *  raid0 - providing it has only two drives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	if (mddev->level == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 		/* for raid0 takeover only one zone is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 		raid0_conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 		if (raid0_conf->nr_strip_zones > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 			pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 				mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 			return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 		return raid10_takeover_raid0(mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 			raid0_conf->strip_zone->zone_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 			raid0_conf->strip_zone->nb_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 	return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) static int raid10_check_reshape(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 	/* Called when there is a request to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 	 * - layout (to ->new_layout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 	 * - chunk size (to ->new_chunk_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 	 * - raid_disks (by delta_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	 * or when trying to restart a reshape that was ongoing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 	 * We need to validate the request and possibly allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 	 * space if that might be an issue later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 	 * Currently we reject any reshape of a 'far' mode array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	 * allow chunk size to change if new is generally acceptable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 	 * allow raid_disks to increase, and allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	 * a switch between 'near' mode and 'offset' mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 	struct geom geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 		/* mustn't change number of copies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	if (geo.far_copies > 1 && !geo.far_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 		/* Cannot switch to 'far' mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 	if (mddev->array_sectors & geo.chunk_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 			/* not factor of array size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	if (!enough(conf, -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 	kfree(conf->mirrors_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 	conf->mirrors_new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 	if (mddev->delta_disks > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 		/* allocate new 'mirrors' list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 		conf->mirrors_new =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 			kcalloc(mddev->raid_disks + mddev->delta_disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 				sizeof(struct raid10_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 		if (!conf->mirrors_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095)  * Need to check if array has failed when deciding whether to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096)  *  - start an array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097)  *  - remove non-faulty devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098)  *  - add a spare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)  *  - allow a reshape
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100)  * This determination is simple when no reshape is happening.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)  * However if there is a reshape, we need to carefully check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102)  * both the before and after sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103)  * This is because some failed devices may only affect one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104)  * of the two sections, and some non-in_sync devices may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105)  * be insync in the section most affected by failed devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) static int calc_degraded(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 	int degraded, degraded2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 	degraded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 	/* 'prev' section first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 	for (i = 0; i < conf->prev.raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 		if (!rdev || test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 			degraded++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 		else if (!test_bit(In_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 			/* When we can reduce the number of devices in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 			 * an array, this might not contribute to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 			 * 'degraded'.  It does now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 			degraded++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 	if (conf->geo.raid_disks == conf->prev.raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 		return degraded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 	degraded2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 	for (i = 0; i < conf->geo.raid_disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 		if (!rdev || test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 			degraded2++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 		else if (!test_bit(In_sync, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 			/* If reshape is increasing the number of devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 			 * this section has already been recovered, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 			 * it doesn't contribute to degraded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 			 * else it does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 			if (conf->geo.raid_disks <= conf->prev.raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 				degraded2++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	if (degraded2 > degraded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 		return degraded2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	return degraded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) static int raid10_start_reshape(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	/* A 'reshape' has been requested. This commits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 	 * This also checks if there are enough spares and adds them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	 * to the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	 * We currently require enough spares to make the final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 	 * array non-degraded.  We also require that the difference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 	 * between old and new data_offset - on each device - is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	 * enough that we never risk over-writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	unsigned long before_length, after_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 	sector_t min_offset_diff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 	int first = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 	struct geom new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 	int spares = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 	if (setup_geo(&new, mddev, geo_start) != conf->copies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 	before_length = ((1 << conf->prev.chunk_shift) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 			 conf->prev.far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 	after_length = ((1 << conf->geo.chunk_shift) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 			conf->geo.far_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 	rdev_for_each(rdev, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 		if (!test_bit(In_sync, &rdev->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 		    && !test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 			spares++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 		if (rdev->raid_disk >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 			long long diff = (rdev->new_data_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 					  - rdev->data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 			if (!mddev->reshape_backwards)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 				diff = -diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 			if (diff < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 				diff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 			if (first || diff < min_offset_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 				min_offset_diff = diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 			first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 	if (max(before_length, after_length) > min_offset_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 	if (spares < mddev->delta_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 	conf->offset_diff = min_offset_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 	spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 	if (conf->mirrors_new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 		memcpy(conf->mirrors_new, conf->mirrors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 		       sizeof(struct raid10_info)*conf->prev.raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 		kfree(conf->mirrors_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 		conf->mirrors_old = conf->mirrors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 		conf->mirrors = conf->mirrors_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 		conf->mirrors_new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 	setup_geo(&conf->geo, mddev, geo_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 	if (mddev->reshape_backwards) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 		sector_t size = raid10_size(mddev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 		if (size < mddev->array_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 			spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 			pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 				mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 		mddev->resync_max_sectors = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 		conf->reshape_progress = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 		conf->reshape_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 	conf->reshape_safe = conf->reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 	spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 	if (mddev->delta_disks && mddev->bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 		struct mdp_superblock_1 *sb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 		sector_t oldsize, newsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 		oldsize = raid10_size(mddev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 		newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 		if (!mddev_is_clustered(mddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 			ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 				goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 		rdev_for_each(rdev, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 			if (rdev->raid_disk > -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 			    !test_bit(Faulty, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 				sb = page_address(rdev->sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 		 * some node is already performing reshape, and no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 		 * call md_bitmap_resize again since it should be called when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 		 * receiving BITMAP_RESIZE msg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 		if ((sb && (le32_to_cpu(sb->feature_map) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 			    MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 		ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 		ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 			md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 	if (mddev->delta_disks > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 		rdev_for_each(rdev, mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 			if (rdev->raid_disk < 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 			    !test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 				if (raid10_add_disk(mddev, rdev) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 					if (rdev->raid_disk >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 					    conf->prev.raid_disks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 						set_bit(In_sync, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 					else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 						rdev->recovery_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 					/* Failure here is OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 					sysfs_link_rdev(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 			} else if (rdev->raid_disk >= conf->prev.raid_disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 				   && !test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 				/* This is a spare that was manually added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 				set_bit(In_sync, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 	/* When a reshape changes the number of devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 	 * ->degraded is measured against the larger of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 	 * pre and  post numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 	mddev->degraded = calc_degraded(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 	spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 	mddev->raid_disks = conf->geo.raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 	mddev->reshape_position = conf->reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 						"reshape");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 	if (!mddev->sync_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 		goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 	conf->reshape_checkpoint = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 	md_wakeup_thread(mddev->sync_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 	md_new_event(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 	mddev->recovery = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 	spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 	conf->geo = conf->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 	mddev->raid_disks = conf->geo.raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 	rdev_for_each(rdev, mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 		rdev->new_data_offset = rdev->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 	conf->reshape_progress = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 	conf->reshape_safe = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 	mddev->reshape_position = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 	spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) /* Calculate the last device-address that could contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339)  * any block from the chunk that includes the array-address 's'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340)  * and report the next address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341)  * i.e. the address returned will be chunk-aligned and after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342)  * any data that is in the chunk containing 's'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) static sector_t last_dev_address(sector_t s, struct geom *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 	s = (s | geo->chunk_mask) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 	s >>= geo->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 	s *= geo->near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 	s *= geo->far_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 	s <<= geo->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 	return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) /* Calculate the first device-address that could contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356)  * any block from the chunk that includes the array-address 's'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357)  * This too will be the start of a chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) static sector_t first_dev_address(sector_t s, struct geom *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 	s >>= geo->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 	s *= geo->near_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 	sector_div(s, geo->raid_disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 	s *= geo->far_copies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 	s <<= geo->chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 	return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 				int *skipped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 	/* We simply copy at most one chunk (smallest of old and new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 	 * at a time, possibly less if that exceeds RESYNC_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 	 * or we hit a bad block or something.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 	 * This might mean we pause for normal IO in the middle of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 	 * a chunk, but that is not a problem as mddev->reshape_position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 	 * can record any location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) 	 * If we will want to write to a location that isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 	 * yet recorded as 'safe' (i.e. in metadata on disk) then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 	 * we need to flush all reshape requests and update the metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 	 * When reshaping forwards (e.g. to more devices), we interpret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 	 * 'safe' as the earliest block which might not have been copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 	 * down yet.  We divide this by previous stripe size and multiply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 	 * by previous stripe length to get lowest device offset that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 	 * cannot write to yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 	 * We interpret 'sector_nr' as an address that we want to write to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 	 * From this we use last_device_address() to find where we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 	 * write to, and first_device_address on the  'safe' position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 	 * If this 'next' write position is after the 'safe' position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 	 * we must update the metadata to increase the 'safe' position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 	 * When reshaping backwards, we round in the opposite direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 	 * and perform the reverse test:  next write position must not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 	 * less than current safe position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 	 * In all this the minimum difference in data offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 	 * (conf->offset_diff - always positive) allows a bit of slack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 	 * so next can be after 'safe', but not by more than offset_diff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 	 * We need to prepare all the bios here before we start any IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 	 * to ensure the size we choose is acceptable to all devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 	 * The means one for each copy for write-out and an extra one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 	 * read-in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 	 * We store the read-in bio in ->master_bio and the others in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 	 * ->devs[x].bio and ->devs[x].repl_bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 	struct r10bio *r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 	sector_t next, safe, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 	int max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 	int nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 	int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 	int need_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 	struct bio *blist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 	struct bio *bio, *read_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 	int sectors_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 	struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 	if (sector_nr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 		/* If restarting in the middle, skip the initial sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 		if (mddev->reshape_backwards &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 			sector_nr = (raid10_size(mddev, 0, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 				     - conf->reshape_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 		} else if (!mddev->reshape_backwards &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 			   conf->reshape_progress > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 			sector_nr = conf->reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 		if (sector_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 			mddev->curr_resync_completed = sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 			sysfs_notify_dirent_safe(mddev->sysfs_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 			*skipped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 			return sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 	/* We don't use sector_nr to track where we are up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 	 * as that doesn't work well for ->reshape_backwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 	 * So just use ->reshape_progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 	if (mddev->reshape_backwards) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 		/* 'next' is the earliest device address that we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 		 * write to for this chunk in the new layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 		next = first_dev_address(conf->reshape_progress - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 					 &conf->geo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 		/* 'safe' is the last device address that we might read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 		 * in the old layout after a restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 		safe = last_dev_address(conf->reshape_safe - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 					&conf->prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) 		if (next + conf->offset_diff < safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 			need_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) 		last = conf->reshape_progress - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 					       & conf->prev.chunk_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 		if (sector_nr + RESYNC_SECTORS < last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 			sector_nr = last + 1 - RESYNC_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 		/* 'next' is after the last device address that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 		 * might write to for this chunk in the new layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 		next = last_dev_address(conf->reshape_progress, &conf->geo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 		/* 'safe' is the earliest device address that we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 		 * read from in the old layout after a restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 		safe = first_dev_address(conf->reshape_safe, &conf->prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 		/* Need to update metadata if 'next' might be beyond 'safe'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 		 * as that would possibly corrupt data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 		if (next > safe + conf->offset_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 			need_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 		sector_nr = conf->reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 		last  = sector_nr | (conf->geo.chunk_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 				     & conf->prev.chunk_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) 		if (sector_nr + RESYNC_SECTORS <= last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 			last = sector_nr + RESYNC_SECTORS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) 	if (need_flush ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 		/* Need to update reshape_position in metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 		wait_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 		mddev->reshape_position = conf->reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 		if (mddev->reshape_backwards)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 				- conf->reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 			mddev->curr_resync_completed = conf->reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 		conf->reshape_checkpoint = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 		md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) 		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) 			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 			allow_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 			return sectors_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 		conf->reshape_safe = mddev->reshape_position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 		allow_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) 	raise_barrier(conf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) read_more:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) 	/* Now schedule reads for blocks from sector_nr to last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 	r10_bio = raid10_alloc_init_r10buf(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 	r10_bio->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 	raise_barrier(conf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 	atomic_set(&r10_bio->remaining, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 	r10_bio->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) 	r10_bio->sector = sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 	set_bit(R10BIO_IsReshape, &r10_bio->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 	r10_bio->sectors = last - sector_nr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) 	rdev = read_balance(conf, r10_bio, &max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) 	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) 	if (!rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 		/* Cannot read from here, so need to record bad blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) 		 * on all the target devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 		// FIXME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 		mempool_free(r10_bio, &conf->r10buf_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 		return sectors_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 	read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 	bio_set_dev(read_bio, rdev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 			       + rdev->data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 	read_bio->bi_private = r10_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 	read_bio->bi_end_io = end_reshape_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 	bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) 	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 	read_bio->bi_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 	read_bio->bi_vcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) 	read_bio->bi_iter.bi_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 	r10_bio->master_bio = read_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) 	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 	 * Broadcast RESYNC message to other nodes, so all nodes would not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 	 * write to the region to avoid conflict.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 	if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 		struct mdp_superblock_1 *sb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 		int sb_reshape_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 		conf->cluster_sync_low = sector_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 		conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 		sb = page_address(rdev->sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 		if (sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 			sb_reshape_pos = le64_to_cpu(sb->reshape_position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 			 * Set cluster_sync_low again if next address for array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 			 * reshape is less than cluster_sync_low. Since we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) 			 * update cluster_sync_low until it has finished reshape.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) 			if (sb_reshape_pos < conf->cluster_sync_low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 				conf->cluster_sync_low = sb_reshape_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 		md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) 							  conf->cluster_sync_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 	/* Now find the locations in the new layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 	__raid10_find_phys(&conf->geo, r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 	blist = read_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 	read_bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 	for (s = 0; s < conf->copies*2; s++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 		struct bio *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 		int d = r10_bio->devs[s/2].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 		struct md_rdev *rdev2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 		if (s&1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 			rdev2 = rcu_dereference(conf->mirrors[d].replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 			b = r10_bio->devs[s/2].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 			rdev2 = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 			b = r10_bio->devs[s/2].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) 		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 		bio_set_dev(b, rdev2->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) 		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 			rdev2->new_data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 		b->bi_end_io = end_reshape_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 		bio_set_op_attrs(b, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 		b->bi_next = blist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 		blist = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 	/* Now add as many pages as possible to all of these bios. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) 	nr_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 		struct page *page = pages[s / (PAGE_SIZE >> 9)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) 		int len = (max_sectors - s) << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 		if (len > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 			len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 		for (bio = blist; bio ; bio = bio->bi_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 			 * won't fail because the vec table is big enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 			 * to hold all these pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 			bio_add_page(bio, page, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 		sector_nr += len >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 		nr_sectors += len >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) 	r10_bio->sectors = nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) 	/* Now submit the read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) 	md_sync_acct_bio(read_bio, r10_bio->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) 	atomic_inc(&r10_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 	read_bio->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) 	submit_bio_noacct(read_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) 	sectors_done += nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) 	if (sector_nr <= last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) 		goto read_more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) 	lower_barrier(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) 	/* Now that we have done the whole section we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) 	 * update reshape_progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) 	if (mddev->reshape_backwards)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) 		conf->reshape_progress -= sectors_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 		conf->reshape_progress += sectors_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) 	return sectors_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) static void end_reshape_request(struct r10bio *r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) static int handle_reshape_read_error(struct mddev *mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) 				     struct r10bio *r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) 	/* Reshape read completed.  Hopefully we have a block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) 	 * to write out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) 	 * If we got a read error then we do sync 1-page reads from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) 	 * elsewhere until we find the data - or give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) 	int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) 		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) 			/* Reshape has been aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 			md_done_sync(mddev, r10_bio->sectors, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) 	/* We definitely have the data in the pages, schedule the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 	 * writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) 	atomic_set(&r10_bio->remaining, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) 	for (s = 0; s < conf->copies*2; s++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) 		struct bio *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) 		int d = r10_bio->devs[s/2].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) 		struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 		if (s&1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 			rdev = rcu_dereference(conf->mirrors[d].replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) 			b = r10_bio->devs[s/2].repl_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 			rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 			b = r10_bio->devs[s/2].bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 		if (!rdev || test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 		atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 		md_sync_acct_bio(b, r10_bio->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 		atomic_inc(&r10_bio->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 		b->bi_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 		submit_bio_noacct(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 	end_reshape_request(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) static void end_reshape(struct r10conf *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) 	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 	spin_lock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 	conf->prev = conf->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 	md_finish_reshape(conf->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	conf->reshape_progress = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) 	conf->reshape_safe = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 	spin_unlock_irq(&conf->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 	if (conf->mddev->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) 		raid10_set_io_opt(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 	conf->fullsync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) static void raid10_update_reshape_pos(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) 	sector_t lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) 	md_cluster_ops->resync_info_get(mddev, &lo, &hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) 	if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) 	    || mddev->reshape_position == MaxSector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) 		conf->reshape_progress = mddev->reshape_position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) static int handle_reshape_read_error(struct mddev *mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 				     struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 	/* Use sync reads to get the blocks from somewhere else */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 	int sectors = r10_bio->sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 	struct r10bio *r10b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) 	int slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 	int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) 	struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 	r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) 	if (!r10b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) 	/* reshape IOs share pages from .devs[0].bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) 	r10b->sector = r10_bio->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 	__raid10_find_phys(&conf->prev, r10b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) 	while (sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) 		int s = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) 		int success = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) 		int first_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) 		if (s > (PAGE_SIZE >> 9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) 			s = PAGE_SIZE >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) 		while (!success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 			int d = r10b->devs[slot].devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) 			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) 			sector_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) 			if (rdev == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 			    test_bit(Faulty, &rdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) 			    !test_bit(In_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 				goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) 			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) 			atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) 			success = sync_page_io(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) 					       addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) 					       s << 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) 					       pages[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) 					       REQ_OP_READ, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 			rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 			if (success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) 		failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 			slot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) 			if (slot >= conf->copies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) 				slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) 			if (slot == first_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) 		if (!success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 			/* couldn't read this block, must give up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 			set_bit(MD_RECOVERY_INTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 				&mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) 			kfree(r10b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) 		sectors -= s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) 		idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) 	kfree(r10b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) static void end_reshape_write(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) 	struct r10bio *r10_bio = get_resync_r10bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) 	struct mddev *mddev = r10_bio->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) 	int d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) 	int repl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) 	struct md_rdev *rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) 	if (repl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) 		rdev = conf->mirrors[d].replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) 	if (!rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) 		rdev = conf->mirrors[d].rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) 	if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) 		/* FIXME should record badblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) 		md_error(mddev, rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) 	rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) 	end_reshape_request(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) static void end_reshape_request(struct r10bio *r10_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) 	if (!atomic_dec_and_test(&r10_bio->remaining))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) 	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) 	bio_put(r10_bio->master_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) 	put_buf(r10_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) static void raid10_finish_reshape(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) 	struct r10conf *conf = mddev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) 	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) 	if (mddev->delta_disks > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) 		if (mddev->recovery_cp > mddev->resync_max_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) 			mddev->recovery_cp = mddev->resync_max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) 		mddev->resync_max_sectors = mddev->array_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) 		int d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) 		for (d = conf->geo.raid_disks ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) 		     d < conf->geo.raid_disks - mddev->delta_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) 		     d++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) 			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) 			if (rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) 				clear_bit(In_sync, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) 			rdev = rcu_dereference(conf->mirrors[d].replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) 			if (rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) 				clear_bit(In_sync, &rdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) 	mddev->layout = mddev->new_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) 	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) 	mddev->reshape_position = MaxSector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) 	mddev->delta_disks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) 	mddev->reshape_backwards = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) static struct md_personality raid10_personality =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) 	.name		= "raid10",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) 	.level		= 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) 	.make_request	= raid10_make_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) 	.run		= raid10_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) 	.free		= raid10_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) 	.status		= raid10_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) 	.error_handler	= raid10_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 	.hot_add_disk	= raid10_add_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) 	.hot_remove_disk= raid10_remove_disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) 	.spare_active	= raid10_spare_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 	.sync_request	= raid10_sync_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 	.quiesce	= raid10_quiesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) 	.size		= raid10_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) 	.resize		= raid10_resize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 	.takeover	= raid10_takeover,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) 	.check_reshape	= raid10_check_reshape,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) 	.start_reshape	= raid10_start_reshape,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 	.finish_reshape	= raid10_finish_reshape,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) 	.update_reshape_pos = raid10_update_reshape_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) static int __init raid_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 	return register_md_personality(&raid10_personality);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) static void raid_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) 	unregister_md_personality(&raid10_personality);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) module_init(raid_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) module_exit(raid_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) MODULE_ALIAS("md-personality-9"); /* RAID10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) MODULE_ALIAS("md-raid10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) MODULE_ALIAS("md-level-10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);