Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * bitmap_create  - sets up the bitmap structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * bitmap_destroy - destroys the bitmap structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * - added disk storage for bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * - changes to allow various bitmap chunk sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * Still to do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * flush after percent set rather than just time based. (maybe both).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <trace/events/block.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "md.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "md-bitmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static inline char *bmname(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * 1) check to see if this page is allocated, if it's not then try to alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * 2) if the alloc fails, set the page's hijacked flag so we'll use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *    page pointer directly as a counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * if we find our page, we increment the page's refcount so that it stays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * allocated while we're using it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 			       unsigned long page, int create, int no_hijack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) __releases(bitmap->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) __acquires(bitmap->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	unsigned char *mappage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	if (page >= bitmap->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		/* This can happen if bitmap_start_sync goes beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		 * End-of-device while looking for a whole page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		 * It is harmless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	if (bitmap->bp[page].map) /* page is already allocated, just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	if (!create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	/* this page has not been allocated yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	spin_unlock_irq(&bitmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	/* It is possible that this is being called inside a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	 * prepare_to_wait/finish_wait loop from raid5c:make_request().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	 * In general it is not permitted to sleep in that context as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	 * can cause the loop to spin freely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	 * That doesn't apply here as we can only reach this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	 * once with any loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	 * When this function completes, either bp[page].map or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	 * bp[page].hijacked.  In either case, this function will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	 * abort before getting to this point again.  So there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	 * no risk of a free-spin, and so it is safe to assert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	 * that sleeping here is allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	sched_annotate_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	spin_lock_irq(&bitmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	if (mappage == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		pr_debug("md/bitmap: map page allocation failed, hijacking\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		/* We don't support hijack for cluster raid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		if (no_hijack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		/* failed - set the hijacked flag so that we can use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		 * pointer as a counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		if (!bitmap->bp[page].map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 			bitmap->bp[page].hijacked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	} else if (bitmap->bp[page].map ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		   bitmap->bp[page].hijacked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		/* somebody beat us to getting the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		kfree(mappage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		/* no page was in place and we have one, so install it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		bitmap->bp[page].map = mappage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		bitmap->missing_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) /* if page is completely empty, put it back on the free list, or dealloc it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) /* if page was hijacked, unmark the flag so it might get alloced next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) /* Note: lock should be held when calling this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	if (bitmap->bp[page].count) /* page is still busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	/* page is no longer in use, it can be released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		bitmap->bp[page].hijacked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		bitmap->bp[page].map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		/* normal case, free the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		ptr = bitmap->bp[page].map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		bitmap->bp[page].map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		bitmap->missing_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * bitmap file handling - read and write the bitmap file and its superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * basic page I/O operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /* IO operations when bitmap is stored near all superblocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static int read_sb_page(struct mddev *mddev, loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			unsigned long index, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	/* choose a good rdev and read the page from there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	sector_t target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	rdev_for_each(rdev, mddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		if (! test_bit(In_sync, &rdev->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		    || test_bit(Faulty, &rdev->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		    || test_bit(Bitmap_sync, &rdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		target = offset + index * (PAGE_SIZE/512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		if (sync_page_io(rdev, target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 				 roundup(size, bdev_logical_block_size(rdev->bdev)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 				 page, REQ_OP_READ, 0, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			page->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	/* Iterate the disks of an mddev, using rcu to protect access to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	 * linked list, and raising the refcount of devices we return to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	 * they don't disappear while in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	 * As devices are only added or removed when raid_disk is < 0 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	 * nr_pending is 0 and In_sync is clear, the entries we return will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	 * still be in the same position on the list when we re-enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	 * list_for_each_entry_continue_rcu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	 * Note that if entered with 'rdev == NULL' to start at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	 * beginning, we temporarily assign 'rdev' to an address which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	 * isn't really an rdev, but which can be used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	 * list_for_each_entry_continue_rcu() to find the first entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (rdev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		/* start at the beginning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		/* release the previous rdev and start from there. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		rdev_dec_pending(rdev, mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		if (rdev->raid_disk >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		    !test_bit(Faulty, &rdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			/* this is a usable devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			atomic_inc(&rdev->nr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 			return rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct mddev *mddev = bitmap->mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	struct bitmap_storage *store = &bitmap->storage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	rdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		int size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		loff_t offset = mddev->bitmap_info.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		if (page->index == store->file_pages-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			int last_page_size = store->bytes & (PAGE_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			if (last_page_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 				last_page_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			size = roundup(last_page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 				       bdev_logical_block_size(bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		/* Just make sure we aren't corrupting data or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		 * metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		if (mddev->external) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			/* Bitmap could be anywhere. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 			if (rdev->sb_start + offset + (page->index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 						       * (PAGE_SIZE/512))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			    > rdev->data_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			    &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			    rdev->sb_start + offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			    < (rdev->data_offset + mddev->dev_sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			     + (PAGE_SIZE/512)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 				goto bad_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		} else if (offset < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			/* DATA  BITMAP METADATA  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			if (offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			    + (long)(page->index * (PAGE_SIZE/512))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			    + size/512 > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 				/* bitmap runs in to metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 				goto bad_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			if (rdev->data_offset + mddev->dev_sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			    > rdev->sb_start + offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 				/* data runs in to bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 				goto bad_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		} else if (rdev->sb_start < rdev->data_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			/* METADATA BITMAP DATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			if (rdev->sb_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			    + offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			    + page->index*(PAGE_SIZE/512) + size/512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			    > rdev->data_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 				/* bitmap runs in to data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 				goto bad_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			/* DATA METADATA BITMAP - no problems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		md_super_write(mddev, rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			       rdev->sb_start + offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			       + page->index * (PAGE_SIZE/512),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			       size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			       page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	if (wait && md_super_wait(mddev) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  bad_alignment:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) static void md_bitmap_file_kick(struct bitmap *bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * write out a page to a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static void write_page(struct bitmap *bitmap, struct page *page, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (bitmap->storage.file == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		switch (write_sb_page(bitmap, page, wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		case -EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		bh = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		while (bh && bh->b_blocknr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			atomic_inc(&bitmap->pending_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			set_buffer_locked(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			set_buffer_mapped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 			wait_event(bitmap->write_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 				   atomic_read(&bitmap->pending_writes)==0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		md_bitmap_file_kick(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static void end_bitmap_write(struct buffer_head *bh, int uptodate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	struct bitmap *bitmap = bh->b_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (!uptodate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (atomic_dec_and_test(&bitmap->pending_writes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		wake_up(&bitmap->write_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) static void free_buffers(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (!PagePrivate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	bh = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	while (bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		struct buffer_head *next = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		free_buffer_head(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		bh = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	detach_page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) /* read a page from a file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * We both read the page, and attach buffers to the page to record the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  * address of each block (using bmap).  These addresses will be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  * to write the block later, completely bypassing the filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  * This usage is similar to how swap files are handled, and allows us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  * to write to a file with no concerns of memory allocation failing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) static int read_page(struct file *file, unsigned long index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		     struct bitmap *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		     unsigned long count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		     struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	sector_t block, blk_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	unsigned long blocksize = i_blocksize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		 (unsigned long long)index << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	bh = alloc_page_buffers(page, blocksize, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (!bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	attach_page_private(page, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	while (bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		block = blk_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			bh->b_blocknr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			ret = bmap(inode, &block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			if (ret || !block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				bh->b_blocknr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			bh->b_blocknr = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			bh->b_bdev = inode->i_sb->s_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			if (count < blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 				count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 				count -= blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			bh->b_end_io = end_bitmap_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			bh->b_private = bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			atomic_inc(&bitmap->pending_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			set_buffer_locked(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			set_buffer_mapped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			submit_bh(REQ_OP_READ, 0, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		blk_cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	page->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	wait_event(bitmap->write_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		   atomic_read(&bitmap->pending_writes)==0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		       (int)PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		       (unsigned long long)index << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		       ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418)  * bitmap file superblock operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * md_bitmap_wait_writes() should be called before writing any bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  * blocks, to ensure previous writes, particularly from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  * md_bitmap_daemon_work(), have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) static void md_bitmap_wait_writes(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (bitmap->storage.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		wait_event(bitmap->write_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			   atomic_read(&bitmap->pending_writes)==0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		/* Note that we ignore the return value.  The writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		 * might have failed, but that would just mean that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		 * some bits which should be cleared haven't been,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		 * which is safe.  The relevant bitmap blocks will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		 * probably get written again, but there is no great
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		 * loss if they aren't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		md_super_wait(bitmap->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) /* update the event counter and sync the superblock to disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) void md_bitmap_update_sb(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	bitmap_super_t *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	if (bitmap->mddev->bitmap_info.external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (!bitmap->storage.sb_page) /* no superblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	sb = kmap_atomic(bitmap->storage.sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	sb->events = cpu_to_le64(bitmap->mddev->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (bitmap->mddev->events < bitmap->events_cleared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		/* rocking back to read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		bitmap->events_cleared = bitmap->mddev->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	 * clear BITMAP_WRITE_ERROR bit to protect against the case that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	 * a bitmap write error occurred but the later writes succeeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	/* Just in case these have been changed via sysfs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	/* This might have been changed by a reshape */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 					   bitmap_info.space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	kunmap_atomic(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	write_page(bitmap, bitmap->storage.sb_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) EXPORT_SYMBOL(md_bitmap_update_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) /* print out the bitmap file superblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) void md_bitmap_print_sb(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	bitmap_super_t *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (!bitmap || !bitmap->storage.sb_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	sb = kmap_atomic(bitmap->storage.sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	pr_debug("       version: %d\n", le32_to_cpu(sb->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	pr_debug("        events: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		 (unsigned long long) le64_to_cpu(sb->events));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	pr_debug("events cleared: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		 (unsigned long long) le64_to_cpu(sb->events_cleared));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	pr_debug("         state: %08x\n", le32_to_cpu(sb->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	pr_debug("     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	pr_debug("  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	pr_debug("     sync size: %llu KB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	kunmap_atomic(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  * bitmap_new_disk_sb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * @bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  * This function verifies 'bitmap_info' and populates the on-disk bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * structure, which is to be written to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * Returns: 0 on success, -Exxx on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	bitmap_super_t *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	unsigned long chunksize, daemon_sleep, write_behind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	if (bitmap->storage.sb_page == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	bitmap->storage.sb_page->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	sb = kmap_atomic(bitmap->storage.sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	sb->magic = cpu_to_le32(BITMAP_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	chunksize = bitmap->mddev->bitmap_info.chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	BUG_ON(!chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (!is_power_of_2(chunksize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		kunmap_atomic(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		pr_warn("bitmap chunksize not a power of 2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	sb->chunksize = cpu_to_le32(chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		pr_debug("Choosing daemon_sleep default (5 sec)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		daemon_sleep = 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 * FIXME: write_behind for RAID1.  If not specified, what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	if (write_behind > COUNTER_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		write_behind = COUNTER_MAX / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	sb->write_behind = cpu_to_le32(write_behind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	/* keep the array size field of the bitmap superblock up to date */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	memcpy(sb->uuid, bitmap->mddev->uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	set_bit(BITMAP_STALE, &bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	sb->state = cpu_to_le32(bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	bitmap->events_cleared = bitmap->mddev->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	bitmap->mddev->bitmap_info.nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	kunmap_atomic(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) /* read the superblock from the bitmap file and initialize some bitmap fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static int md_bitmap_read_sb(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	char *reason = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	bitmap_super_t *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	unsigned long chunksize, daemon_sleep, write_behind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	unsigned long long events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	int nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	unsigned long sectors_reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	struct page *sb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	loff_t offset = bitmap->mddev->bitmap_info.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		chunksize = 128 * 1024 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		daemon_sleep = 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		write_behind = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		set_bit(BITMAP_STALE, &bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		goto out_no_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	/* page 0 is the superblock, read it... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	sb_page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	if (!sb_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	bitmap->storage.sb_page = sb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) re_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	/* If cluster_slot is set, the cluster is setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (bitmap->cluster_slot >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			   (bitmap->mddev->bitmap_info.chunksize >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		/* bits to bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		/* to 4k blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			bitmap->cluster_slot, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (bitmap->storage.file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		err = read_page(bitmap->storage.file, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 				bitmap, bytes, sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		err = read_sb_page(bitmap->mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 				   offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 				   sb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 				   0, sizeof(bitmap_super_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	sb = kmap_atomic(sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	chunksize = le32_to_cpu(sb->chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	write_behind = le32_to_cpu(sb->write_behind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	/* Setup nodes/clustername only if bitmap version is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 * cluster-compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		nodes = le32_to_cpu(sb->nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		strlcpy(bitmap->mddev->bitmap_info.cluster_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 				sb->cluster_name, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	/* verify that the bitmap-specific fields are valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		reason = "bad magic";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		reason = "unrecognized superblock version";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	else if (chunksize < 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		reason = "bitmap chunksize too small";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	else if (!is_power_of_2(chunksize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		reason = "bitmap chunksize not a power of 2";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		reason = "daemon sleep period out of range";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	else if (write_behind > COUNTER_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		reason = "write-behind limit out of range (0 - 16383)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		pr_warn("%s: invalid bitmap file superblock: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			bmname(bitmap), reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	/* keep the array size field of the bitmap superblock up to date */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (bitmap->mddev->persistent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		 * We have a persistent array superblock, so compare the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 * bitmap's UUID and event counter to the mddev's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			pr_warn("%s: bitmap superblock UUID mismatch\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				bmname(bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		events = le64_to_cpu(sb->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		if (!nodes && (events < bitmap->mddev->events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 				bmname(bitmap), events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 				(unsigned long long) bitmap->mddev->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			set_bit(BITMAP_STALE, &bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	/* assign fields using values from superblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	bitmap->flags |= le32_to_cpu(sb->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	kunmap_atomic(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	/* Assigning chunksize is required for "re_read" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	bitmap->mddev->bitmap_info.chunksize = chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		err = md_setup_cluster(bitmap->mddev, nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			pr_warn("%s: Could not setup cluster service (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 				bmname(bitmap), err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			goto out_no_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		goto re_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) out_no_sb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (test_bit(BITMAP_STALE, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		bitmap->events_cleared = bitmap->mddev->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	bitmap->mddev->bitmap_info.chunksize = chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	bitmap->mddev->bitmap_info.nodes = nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	if (bitmap->mddev->bitmap_info.space == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	    bitmap->mddev->bitmap_info.space > sectors_reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		bitmap->mddev->bitmap_info.space = sectors_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		md_bitmap_print_sb(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		if (bitmap->cluster_slot < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			md_cluster_stop(bitmap->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736)  * general bitmap file operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  * on-disk bitmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * file a page at a time. There's a superblock at the start of the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) /* calculate the index of the page that contains this bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static inline unsigned long file_page_index(struct bitmap_storage *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 					    unsigned long chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (store->sb_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		chunk += sizeof(bitmap_super_t) << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	return chunk >> PAGE_BIT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) /* calculate the (bit) offset of this bit within a page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) static inline unsigned long file_page_offset(struct bitmap_storage *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 					     unsigned long chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (store->sb_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		chunk += sizeof(bitmap_super_t) << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	return chunk & (PAGE_BITS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * return a pointer to the page in the filemap that contains the given bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) static inline struct page *filemap_get_page(struct bitmap_storage *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 					    unsigned long chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (file_page_index(store, chunk) >= store->file_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	return store->filemap[file_page_index(store, chunk)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) static int md_bitmap_storage_alloc(struct bitmap_storage *store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 				   unsigned long chunks, int with_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 				   int slot_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	int pnum, offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	unsigned long num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	unsigned long bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	bytes = DIV_ROUND_UP(chunks, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (with_super)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		bytes += sizeof(bitmap_super_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	offset = slot_number * num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (!store->filemap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (with_super && !store->sb_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		if (store->sb_page == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	pnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (store->sb_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		store->filemap[0] = store->sb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		pnum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		store->sb_page->index = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	for ( ; pnum < num_pages; pnum++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		if (!store->filemap[pnum]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			store->file_pages = pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		store->filemap[pnum]->index = pnum + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	store->file_pages = pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	/* We need 4 bits per page, rounded up to a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 * of sizeof(unsigned long) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	store->filemap_attr = kzalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (!store->filemap_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	store->bytes = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) static void md_bitmap_file_unmap(struct bitmap_storage *store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	struct page **map, *sb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	int pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	file = store->file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	map = store->filemap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	pages = store->file_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	sb_page = store->sb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	while (pages--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		if (map[pages] != sb_page) /* 0 is sb_page, release it below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 			free_buffers(map[pages]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	kfree(store->filemap_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (sb_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		free_buffers(sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		invalidate_mapping_pages(inode->i_mapping, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		fput(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  * bitmap_file_kick - if an error occurs while manipulating the bitmap file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  * then it is no longer reliable, so we stop using it and we mark the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * as failed in the superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) static void md_bitmap_file_kick(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	char *path, *ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		md_bitmap_update_sb(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		if (bitmap->storage.file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			path = kmalloc(PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			if (path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				ptr = file_path(bitmap->storage.file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 					     path, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			pr_warn("%s: kicking failed bitmap file %s from array!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 				bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			pr_warn("%s: disabling internal bitmap due to errors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 				bmname(bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) enum bitmap_page_attr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 				    * i.e. counter is 1 or 2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) static inline void set_page_attr(struct bitmap *bitmap, int pnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 				 enum bitmap_page_attr attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 				   enum bitmap_page_attr attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) static inline int test_page_attr(struct bitmap *bitmap, int pnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 				 enum bitmap_page_attr attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 					   enum bitmap_page_attr attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	return test_and_clear_bit((pnum<<2) + attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 				  bitmap->storage.filemap_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918)  * bitmap_file_set_bit -- called before performing a write to the md device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919)  * to set (and eventually sync) a particular bit in the bitmap file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  * we set the bit immediately, then we record the page number so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  * when an unplug occurs, we can flush the dirty pages out to disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	unsigned long bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	unsigned long chunk = block >> bitmap->counts.chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	struct bitmap_storage *store = &bitmap->storage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	unsigned long node_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	if (mddev_is_clustered(bitmap->mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		node_offset = bitmap->cluster_slot * store->file_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	page = filemap_get_page(&bitmap->storage, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	bit = file_page_offset(&bitmap->storage, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	/* set the bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		set_bit(bit, kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		set_bit_le(bit, kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	pr_debug("set file bit %lu page %lu\n", bit, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	/* record page number so it gets flushed to disk when unplug occurs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	unsigned long bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	void *paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	unsigned long chunk = block >> bitmap->counts.chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct bitmap_storage *store = &bitmap->storage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	unsigned long node_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (mddev_is_clustered(bitmap->mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		node_offset = bitmap->cluster_slot * store->file_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	page = filemap_get_page(&bitmap->storage, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	bit = file_page_offset(&bitmap->storage, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	paddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		clear_bit(bit, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		clear_bit_le(bit, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	kunmap_atomic(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		bitmap->allclean = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	unsigned long bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	void *paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	unsigned long chunk = block >> bitmap->counts.chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	int set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	page = filemap_get_page(&bitmap->storage, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	bit = file_page_offset(&bitmap->storage, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	paddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		set = test_bit(bit, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		set = test_bit_le(bit, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	kunmap_atomic(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* this gets called when the md device is ready to unplug its underlying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * (slave) device queues -- before we let any writes go down, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * sync the dirty pages of the bitmap file to disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) void md_bitmap_unplug(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	int dirty, need_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	int writing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	if (!bitmap || !bitmap->storage.filemap ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	    test_bit(BITMAP_STALE, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	/* look at each page to see if there are any set bits that need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 * flushed out to disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	for (i = 0; i < bitmap->storage.file_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		need_write = test_and_clear_page_attr(bitmap, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 						      BITMAP_PAGE_NEEDWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		if (dirty || need_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			if (!writing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				md_bitmap_wait_writes(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				if (bitmap->mddev->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 					blk_add_trace_msg(bitmap->mddev->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 							  "md bitmap_unplug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			write_page(bitmap, bitmap->storage.filemap[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			writing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (writing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		md_bitmap_wait_writes(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		md_bitmap_file_kick(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) EXPORT_SYMBOL(md_bitmap_unplug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)  * the in-memory bitmap from the on-disk bitmap -- also, sets up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)  * memory mapping of the bitmap file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)  * Special cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  *   if there's no bitmap file, or if the bitmap file had been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  *   previously kicked from the array, we mark all the bits as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  *   1's in order to cause a full resync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  * We ignore all bits for sectors that end earlier than 'start'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  * This is used when reading an out-of-date bitmap...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	unsigned long bit_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	int outofdate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	int ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	void *paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	struct bitmap_storage *store = &bitmap->storage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	chunks = bitmap->counts.chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	file = store->file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (!file && !bitmap->mddev->bitmap_info.offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		/* No permanent bitmap - fill with '1s'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		store->filemap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		store->file_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		for (i = 0; i < chunks ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			/* if the disk bit is set, set the memory bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 				      >= start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			md_bitmap_set_memory_bits(bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 						  (sector_t)i << bitmap->counts.chunkshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 						  needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	if (outofdate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	if (file && i_size_read(file->f_mapping->host) < store->bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		pr_warn("%s: bitmap file too short %lu < %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			bmname(bitmap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			(unsigned long) i_size_read(file->f_mapping->host),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			store->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	oldindex = ~0L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (!bitmap->mddev->bitmap_info.external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		offset = sizeof(bitmap_super_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	if (mddev_is_clustered(bitmap->mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	for (i = 0; i < chunks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		int b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		index = file_page_index(&bitmap->storage, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		bit = file_page_offset(&bitmap->storage, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		if (index != oldindex) { /* this is a new page, read it in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			/* unmap the old page, we're done with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			if (index == store->file_pages-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 				count = store->bytes - index * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 				count = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			page = store->filemap[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			if (file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 				ret = read_page(file, index, bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 						count, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 				ret = read_sb_page(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 					bitmap->mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 					bitmap->mddev->bitmap_info.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 					page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 					index + node_offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			oldindex = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			if (outofdate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				 * if bitmap is out of date, dirty the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 				 * whole page and write it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				paddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 				memset(paddr + offset, 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				       PAGE_SIZE - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 				kunmap_atomic(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 				write_page(bitmap, page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 				ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 				if (test_bit(BITMAP_WRITE_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 					     &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 					goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		paddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			b = test_bit(bit, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 			b = test_bit_le(bit, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		kunmap_atomic(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		if (b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			/* if the disk bit is set, set the memory bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 				      >= start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			md_bitmap_set_memory_bits(bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 						  (sector_t)i << bitmap->counts.chunkshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 						  needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			bit_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		 bmname(bitmap), store->file_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		 bit_cnt, chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	pr_warn("%s: bitmap initialisation failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		bmname(bitmap), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) void md_bitmap_write_all(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	/* We don't actually write all bitmap blocks here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	 * just flag them as needing to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	if (!bitmap || !bitmap->storage.filemap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	if (bitmap->storage.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		/* Only one copy, so nothing needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	for (i = 0; i < bitmap->storage.file_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		set_page_attr(bitmap, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 			      BITMAP_PAGE_NEEDWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	bitmap->allclean = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static void md_bitmap_count_page(struct bitmap_counts *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 				 sector_t offset, int inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	sector_t chunk = offset >> bitmap->chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	bitmap->bp[page].count += inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	md_bitmap_checkfree(bitmap, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	sector_t chunk = offset >> bitmap->chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	struct bitmap_page *bp = &bitmap->bp[page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	if (!bp->pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		bp->pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 					       sector_t offset, sector_t *blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 					       int create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)  * bitmap daemon -- periodically wakes up to clean bits and flush pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)  *			out to disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) void md_bitmap_daemon_work(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	struct bitmap *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	unsigned long j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	unsigned long nextpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	sector_t blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	struct bitmap_counts *counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	/* Use a mutex to guard daemon_work against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	 * bitmap_destroy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	mutex_lock(&mddev->bitmap_info.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	if (bitmap == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		mutex_unlock(&mddev->bitmap_info.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	if (time_before(jiffies, bitmap->daemon_lastrun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			+ mddev->bitmap_info.daemon_sleep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	bitmap->daemon_lastrun = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (bitmap->allclean) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	bitmap->allclean = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (bitmap->mddev->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		blk_add_trace_msg(bitmap->mddev->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 				  "md bitmap_daemon_work");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	/* Any file-page which is PENDING now needs to be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	 * So set NEEDWRITE now, then after we make any last-minute changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	 * we will write it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	for (j = 0; j < bitmap->storage.file_pages; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		if (test_and_clear_page_attr(bitmap, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 					     BITMAP_PAGE_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			set_page_attr(bitmap, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 				      BITMAP_PAGE_NEEDWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (bitmap->need_sync &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	    mddev->bitmap_info.external == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		/* Arrange for superblock update as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		 * other changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		bitmap_super_t *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		bitmap->need_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		if (bitmap->storage.filemap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			sb = kmap_atomic(bitmap->storage.sb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			sb->events_cleared =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 				cpu_to_le64(bitmap->events_cleared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			kunmap_atomic(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			set_page_attr(bitmap, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 				      BITMAP_PAGE_NEEDWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	/* Now look at the bitmap counters and if any are '2' or '1',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	 * decrement and handle accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	counts = &bitmap->counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	spin_lock_irq(&counts->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	nextpage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	for (j = 0; j < counts->chunks; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		bitmap_counter_t *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		sector_t  block = (sector_t)j << counts->chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		if (j == nextpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			nextpage += PAGE_COUNTER_RATIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 				j |= PAGE_COUNTER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		if (!bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 			j |= PAGE_COUNTER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		if (*bmc == 1 && !bitmap->need_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			/* We can clear the bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			*bmc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			md_bitmap_count_page(counts, block, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			md_bitmap_file_clear_bit(bitmap, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		} else if (*bmc && *bmc <= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			*bmc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			md_bitmap_set_pending(counts, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			bitmap->allclean = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	spin_unlock_irq(&counts->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	md_bitmap_wait_writes(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	 * DIRTY pages need to be written by bitmap_unplug so it can wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	 * for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	 * If we find any DIRTY page we stop there and let bitmap_unplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	 * handle all the rest.  This is important in the case where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	 * the first blocking holds the superblock and it has been updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	 * We mustn't write any other blocks before the superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	for (j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	     j < bitmap->storage.file_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		     && !test_bit(BITMAP_STALE, &bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	     j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		if (test_page_attr(bitmap, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				   BITMAP_PAGE_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			/* bitmap_unplug will handle the rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		if (bitmap->storage.filemap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		    test_and_clear_page_attr(bitmap, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 					     BITMAP_PAGE_NEEDWRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			write_page(bitmap, bitmap->storage.filemap[j], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)  done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	if (bitmap->allclean == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		mddev->thread->timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			mddev->bitmap_info.daemon_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	mutex_unlock(&mddev->bitmap_info.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 					       sector_t offset, sector_t *blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 					       int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) __releases(bitmap->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) __acquires(bitmap->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	/* If 'create', we might release the lock and reclaim it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	 * The lock must have been taken with interrupts enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	 * If !create, we don't release the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	sector_t chunk = offset >> bitmap->chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	sector_t csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	err = md_bitmap_checkpage(bitmap, page, create, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (bitmap->bp[page].hijacked ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	    bitmap->bp[page].map == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		csize = ((sector_t)1) << (bitmap->chunkshift +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 					  PAGE_COUNTER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		csize = ((sector_t)1) << bitmap->chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	*blocks = csize - (offset & (csize - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	/* now locked ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		/* should we use the first or second counter field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		 * of the hijacked pointer? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		int hi = (pageoff > PAGE_COUNTER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		return  &((bitmap_counter_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			  &bitmap->bp[page].map)[hi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	} else /* page is allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		return (bitmap_counter_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 			&(bitmap->bp[page].map[pageoff]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (behind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		int bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		atomic_inc(&bitmap->behind_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		bw = atomic_read(&bitmap->behind_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		if (bw > bitmap->behind_writes_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			bitmap->behind_writes_used = bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		pr_debug("inc write-behind count %d/%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			 bw, bitmap->mddev->bitmap_info.max_write_behind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	while (sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		sector_t blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		bitmap_counter_t *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		spin_lock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		if (!bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			spin_unlock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 			DEFINE_WAIT(__wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 			/* note that it is safe to do the prepare_to_wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			 * after the test as long as we do it before dropping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			 * the spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 			prepare_to_wait(&bitmap->overflow_wait, &__wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 					TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 			spin_unlock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 			finish_wait(&bitmap->overflow_wait, &__wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		switch (*bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			md_bitmap_file_set_bit(bitmap, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			md_bitmap_count_page(&bitmap->counts, offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 			*bmc = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		(*bmc)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		spin_unlock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		offset += blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		if (sectors > blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			sectors -= blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) EXPORT_SYMBOL(md_bitmap_startwrite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 			unsigned long sectors, int success, int behind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	if (behind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		if (atomic_dec_and_test(&bitmap->behind_writes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			wake_up(&bitmap->behind_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		pr_debug("dec write-behind count %d/%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			 atomic_read(&bitmap->behind_writes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			 bitmap->mddev->bitmap_info.max_write_behind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	while (sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		sector_t blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		bitmap_counter_t *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		spin_lock_irqsave(&bitmap->counts.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		if (!bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			spin_unlock_irqrestore(&bitmap->counts.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		if (success && !bitmap->mddev->degraded &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		    bitmap->events_cleared < bitmap->mddev->events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			bitmap->events_cleared = bitmap->mddev->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			bitmap->need_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		if (!success && !NEEDED(*bmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			*bmc |= NEEDED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		if (COUNTER(*bmc) == COUNTER_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			wake_up(&bitmap->overflow_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		(*bmc)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		if (*bmc <= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			md_bitmap_set_pending(&bitmap->counts, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			bitmap->allclean = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		spin_unlock_irqrestore(&bitmap->counts.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		offset += blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		if (sectors > blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			sectors -= blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) EXPORT_SYMBOL(md_bitmap_endwrite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 			       int degraded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	bitmap_counter_t *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		*blocks = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		return 1; /* always resync if no bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	spin_lock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	if (bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		/* locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		if (RESYNC(*bmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		else if (NEEDED(*bmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			if (!degraded) { /* don't set/clear bits if degraded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 				*bmc |= RESYNC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 				*bmc &= ~NEEDED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	spin_unlock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			 int degraded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	/* bitmap_start_sync must always report on multiples of whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	 * pages, otherwise resync (which is very PAGE_SIZE based) will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	 * get confused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	 * So call __bitmap_start_sync repeatedly (if needed) until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	 * At least PAGE_SIZE>>9 blocks are covered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	 * Return the 'or' of the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	sector_t blocks1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	*blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	while (*blocks < (PAGE_SIZE>>9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		rv |= __bitmap_start_sync(bitmap, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 					  &blocks1, degraded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		offset += blocks1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		*blocks += blocks1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) EXPORT_SYMBOL(md_bitmap_start_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	bitmap_counter_t *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	if (bitmap == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		*blocks = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	spin_lock_irqsave(&bitmap->counts.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (bmc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	/* locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	if (RESYNC(*bmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		*bmc &= ~RESYNC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		if (!NEEDED(*bmc) && aborted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			*bmc |= NEEDED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 			if (*bmc <= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 				md_bitmap_set_pending(&bitmap->counts, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 				bitmap->allclean = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)  unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	spin_unlock_irqrestore(&bitmap->counts.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) EXPORT_SYMBOL(md_bitmap_end_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) void md_bitmap_close_sync(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	/* Sync has finished, and any bitmap chunks that weren't synced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	 * properly have been aborted.  It remains to us to clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	 * RESYNC bit wherever it is still on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	sector_t sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	sector_t blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	while (sector < bitmap->mddev->resync_max_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		md_bitmap_end_sync(bitmap, sector, &blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		sector += blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) EXPORT_SYMBOL(md_bitmap_close_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	sector_t s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	sector_t blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	if (sector == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		bitmap->last_end_sync = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	if (!force && time_before(jiffies, (bitmap->last_end_sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 				  + bitmap->mddev->bitmap_info.daemon_sleep)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	wait_event(bitmap->mddev->recovery_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		   atomic_read(&bitmap->mddev->recovery_active) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	bitmap->mddev->curr_resync_completed = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		md_bitmap_end_sync(bitmap, s, &blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		s += blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	bitmap->last_end_sync = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) EXPORT_SYMBOL(md_bitmap_cond_end_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) void md_bitmap_sync_with_cluster(struct mddev *mddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 			      sector_t old_lo, sector_t old_hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			      sector_t new_lo, sector_t new_hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	struct bitmap *bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	sector_t sector, blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	for (sector = old_lo; sector < new_lo; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		md_bitmap_end_sync(bitmap, sector, &blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		sector += blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	for (sector = old_hi; sector < new_hi; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		md_bitmap_start_sync(bitmap, sector, &blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		sector += blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	/* For each chunk covered by any of these sectors, set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	 * counter to 2 and possibly set resync_needed.  They should all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	 * be 0 at this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	sector_t secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	bitmap_counter_t *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	spin_lock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	if (!bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		spin_unlock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (!*bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		*bmc = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		md_bitmap_count_page(&bitmap->counts, offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		md_bitmap_set_pending(&bitmap->counts, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		bitmap->allclean = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	if (needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		*bmc |= NEEDED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	spin_unlock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) /* dirty the memory and file bits for bitmap chunks "s" to "e" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	unsigned long chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	for (chunk = s; chunk <= e; chunk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		md_bitmap_set_memory_bits(bitmap, sec, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		md_bitmap_file_set_bit(bitmap, sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		if (sec < bitmap->mddev->recovery_cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			/* We are asserting that the array is dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			 * so move the recovery_cp address back so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 			 * that it is obvious that it is dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			bitmap->mddev->recovery_cp = sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)  * flush out any pending updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) void md_bitmap_flush(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	struct bitmap *bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	long sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	if (!bitmap) /* there was no bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	/* run the daemon_work three time to ensure everything is flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	 * that can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	sleep = mddev->bitmap_info.daemon_sleep * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	bitmap->daemon_lastrun -= sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	md_bitmap_daemon_work(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	bitmap->daemon_lastrun -= sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	md_bitmap_daemon_work(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	bitmap->daemon_lastrun -= sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	md_bitmap_daemon_work(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	if (mddev->bitmap_info.external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		md_super_wait(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	md_bitmap_update_sb(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)  * free memory that was allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) void md_bitmap_free(struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	unsigned long k, pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	struct bitmap_page *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	if (!bitmap) /* there was no bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	if (bitmap->sysfs_can_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		sysfs_put(bitmap->sysfs_can_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		md_cluster_stop(bitmap->mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	/* Shouldn't be needed - but just in case.... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	wait_event(bitmap->write_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		   atomic_read(&bitmap->pending_writes) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	/* release the bitmap file  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	md_bitmap_file_unmap(&bitmap->storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	bp = bitmap->counts.bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	pages = bitmap->counts.pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	/* free all allocated memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (bp) /* deallocate the page memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		for (k = 0; k < pages; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			if (bp[k].map && !bp[k].hijacked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 				kfree(bp[k].map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	kfree(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	kfree(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) EXPORT_SYMBOL(md_bitmap_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) void md_bitmap_wait_behind_writes(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	struct bitmap *bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	/* wait for behind writes to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			 mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		/* need to kick something here to make sure I/O goes? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		wait_event(bitmap->behind_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			   atomic_read(&bitmap->behind_writes) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) void md_bitmap_destroy(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	struct bitmap *bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	if (!bitmap) /* there was no bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	md_bitmap_wait_behind_writes(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	if (!mddev->serialize_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		mddev_destroy_serial_pool(mddev, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	mutex_lock(&mddev->bitmap_info.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	spin_lock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	mddev->bitmap = NULL; /* disconnect from the md device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	spin_unlock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	mutex_unlock(&mddev->bitmap_info.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	if (mddev->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	md_bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)  * initialize the bitmap structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  * if this returns an error, bitmap_destroy must be called to do clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)  * once mddev->bitmap is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	struct bitmap *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	sector_t blocks = mddev->resync_max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	struct file *file = mddev->bitmap_info.file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	struct kernfs_node *bm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	BUG_ON(file && mddev->bitmap_info.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 			  mdname(mddev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	spin_lock_init(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	atomic_set(&bitmap->pending_writes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	init_waitqueue_head(&bitmap->write_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	init_waitqueue_head(&bitmap->overflow_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	init_waitqueue_head(&bitmap->behind_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	bitmap->mddev = mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	bitmap->cluster_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	if (mddev->kobj.sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	if (bm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		sysfs_put(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		bitmap->sysfs_can_clear = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	bitmap->storage.file = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	if (file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		get_file(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		/* As future accesses to this file will use bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		 * and bypass the page cache, we must sync the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		 * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		vfs_fsync(file, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	if (!mddev->bitmap_info.external) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		 * instructing us to create a new on-disk bitmap instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			err = md_bitmap_new_disk_sb(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			err = md_bitmap_read_sb(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		if (mddev->bitmap_info.chunksize == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		    mddev->bitmap_info.daemon_sleep == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 			/* chunksize and time_base need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			 * set first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	bitmap->daemon_lastrun = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	pr_debug("created bitmap (%lu pages) for device %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		 bitmap->counts.pages, bmname(bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	return bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)  error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	md_bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) int md_bitmap_load(struct mddev *mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	sector_t start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	sector_t sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	struct bitmap *bitmap = mddev->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	rdev_for_each(rdev, mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		mddev_create_serial_pool(mddev, rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	if (mddev_is_clustered(mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	/* Clear out old bitmap info first:  Either there is none, or we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	 * are resuming after someone else has possibly changed things,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	 * so we should forget old cached info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	 * All chunks should be clean, but some might need_sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	while (sector < mddev->resync_max_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		sector_t blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		md_bitmap_start_sync(bitmap, sector, &blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		sector += blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	md_bitmap_close_sync(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	if (mddev->degraded == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	    || bitmap->events_cleared == mddev->events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		/* no need to keep dirty bits to optimise a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		 * re-add of a missing device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		start = mddev->recovery_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	mutex_lock(&mddev->bitmap_info.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	err = md_bitmap_init_from_disk(bitmap, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	mutex_unlock(&mddev->bitmap_info.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	clear_bit(BITMAP_STALE, &bitmap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	/* Kick recovery in case any bits were set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	md_bitmap_update_sb(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) EXPORT_SYMBOL_GPL(md_bitmap_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /* caller need to free returned bitmap with md_bitmap_free() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	struct bitmap *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	bitmap = md_bitmap_create(mddev, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	if (IS_ERR(bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		rv = PTR_ERR(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		return ERR_PTR(rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	rv = md_bitmap_init_from_disk(bitmap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		md_bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		return ERR_PTR(rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	return bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) EXPORT_SYMBOL(get_bitmap_from_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) /* Loads the bitmap associated with slot and copies the resync information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)  * to our bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		sector_t *low, sector_t *high, bool clear_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	int rv = 0, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	sector_t block, lo = 0, hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	struct bitmap_counts *counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	struct bitmap *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	bitmap = get_bitmap_from_slot(mddev, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	if (IS_ERR(bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	counts = &bitmap->counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	for (j = 0; j < counts->chunks; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		block = (sector_t)j << counts->chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		if (md_bitmap_file_test_bit(bitmap, block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 			if (!lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 				lo = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 			hi = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 			md_bitmap_file_clear_bit(bitmap, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 			md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			md_bitmap_file_set_bit(mddev->bitmap, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	if (clear_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		md_bitmap_update_sb(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		for (i = 0; i < bitmap->storage.file_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 				set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		md_bitmap_unplug(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	md_bitmap_unplug(mddev->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	*low = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	*high = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	md_bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	unsigned long chunk_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	struct bitmap_counts *counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	counts = &bitmap->counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		   "%lu%s chunk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		   counts->pages - counts->missing_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		   counts->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		   (counts->pages - counts->missing_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		   << (PAGE_SHIFT - 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		   chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		   chunk_kb ? "KB" : "B");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	if (bitmap->storage.file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		seq_printf(seq, ", file: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		seq_file_path(seq, bitmap->storage.file, " \t\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	seq_printf(seq, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		  int chunksize, int init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	/* If chunk_size is 0, choose an appropriate chunk size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	 * Then possibly allocate new storage space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	 * Then quiesce, copy bits, replace bitmap, and re-start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	 * This function is called both to set up the initial bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	 * and to resize the bitmap while the array is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	 * If this happens as a result of the array being resized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	 * chunksize will be zero, and we need to choose a suitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	 * chunksize, otherwise we use what we are given.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	struct bitmap_storage store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	struct bitmap_counts old_counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	unsigned long chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	sector_t block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	sector_t old_blocks, new_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	int chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	long pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	struct bitmap_page *new_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	if (bitmap->storage.file && !init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		pr_info("md: cannot resize file-based bitmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	if (chunksize == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		/* If there is enough space, leave the chunk size unchanged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		 * else increase by factor of two until there is enough space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		long bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		long space = bitmap->mddev->bitmap_info.space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		if (space == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 			/* We don't know how much space there is, so limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 			 * to current size - in sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 			bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 			if (!bitmap->mddev->bitmap_info.external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 				bytes += sizeof(bitmap_super_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 			space = DIV_ROUND_UP(bytes, 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 			bitmap->mddev->bitmap_info.space = space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		chunkshift = bitmap->counts.chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		chunkshift--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 			/* 'chunkshift' is shift from block size to chunk size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 			chunkshift++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 			chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 			bytes = DIV_ROUND_UP(chunks, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 			if (!bitmap->mddev->bitmap_info.external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 				bytes += sizeof(bitmap_super_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		} while (bytes > (space << 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	memset(&store, 0, sizeof(store));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		ret = md_bitmap_storage_alloc(&store, chunks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 					      !bitmap->mddev->bitmap_info.external,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 					      mddev_is_clustered(bitmap->mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 					      ? bitmap->cluster_slot : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		md_bitmap_file_unmap(&store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	if (!new_bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		md_bitmap_file_unmap(&store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	if (!init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	store.file = bitmap->storage.file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	bitmap->storage.file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	if (store.sb_page && bitmap->storage.sb_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		memcpy(page_address(store.sb_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		       page_address(bitmap->storage.sb_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		       sizeof(bitmap_super_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	spin_lock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	md_bitmap_file_unmap(&bitmap->storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	bitmap->storage = store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	old_counts = bitmap->counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	bitmap->counts.bp = new_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	bitmap->counts.pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	bitmap->counts.missing_pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	bitmap->counts.chunkshift = chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	bitmap->counts.chunks = chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 						     BITMAP_BLOCK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	blocks = min(old_counts.chunks << old_counts.chunkshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		     chunks << chunkshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	/* For cluster raid, need to pre-allocate bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	if (mddev_is_clustered(bitmap->mddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		unsigned long page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		for (page = 0; page < pages; page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 			ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 				unsigned long k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 				/* deallocate the page memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 				for (k = 0; k < page; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 					kfree(new_bp[k].map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 				kfree(new_bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 				/* restore some fields from old_counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 				bitmap->counts.bp = old_counts.bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 				bitmap->counts.pages = old_counts.pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 				bitmap->counts.missing_pages = old_counts.pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 				bitmap->counts.chunkshift = old_counts.chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 				bitmap->counts.chunks = old_counts.chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 				bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 									     BITMAP_BLOCK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 				blocks = old_counts.chunks << old_counts.chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 				pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				bitmap->counts.bp[page].count += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	for (block = 0; block < blocks; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		bitmap_counter_t *bmc_old, *bmc_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		int set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		set = bmc_old && NEEDED(*bmc_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		if (set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 			bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 			if (*bmc_new == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 				/* need to set on-disk bits too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 				sector_t end = block + new_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 				sector_t start = block >> chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 				start <<= chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 				while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 					md_bitmap_file_set_bit(bitmap, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 					start += 1 << chunkshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 				*bmc_new = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 				md_bitmap_count_page(&bitmap->counts, block, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 				md_bitmap_set_pending(&bitmap->counts, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			*bmc_new |= NEEDED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 			if (new_blocks < old_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 				old_blocks = new_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		block += old_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	if (bitmap->counts.bp != old_counts.bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		unsigned long k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		for (k = 0; k < old_counts.pages; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 			if (!old_counts.bp[k].hijacked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 				kfree(old_counts.bp[k].map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		kfree(old_counts.bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	if (!init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		while (block < (chunks << chunkshift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 			bitmap_counter_t *bmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 			bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 			if (bmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 				/* new space.  It needs to be resynced, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 				 * we set NEEDED_MASK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 				if (*bmc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 					*bmc = NEEDED_MASK | 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 					md_bitmap_count_page(&bitmap->counts, block, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 					md_bitmap_set_pending(&bitmap->counts, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 			block += new_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		for (i = 0; i < bitmap->storage.file_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	spin_unlock_irq(&bitmap->counts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	if (!init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		md_bitmap_unplug(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) EXPORT_SYMBOL_GPL(md_bitmap_resize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) location_show(struct mddev *mddev, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	if (mddev->bitmap_info.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		len = sprintf(page, "file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	else if (mddev->bitmap_info.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		len = sprintf(page, "none");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	len += sprintf(page+len, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) location_store(struct mddev *mddev, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	rv = mddev_lock(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	if (mddev->pers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		if (!mddev->pers->quiesce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 			rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		if (mddev->recovery || mddev->sync_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 			rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	if (mddev->bitmap || mddev->bitmap_info.file ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	    mddev->bitmap_info.offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		/* bitmap already configured.  Only option is to clear it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		if (strncmp(buf, "none", 4) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 			rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		if (mddev->pers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 			mddev_suspend(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			md_bitmap_destroy(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 			mddev_resume(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		mddev->bitmap_info.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		if (mddev->bitmap_info.file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 			struct file *f = mddev->bitmap_info.file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 			mddev->bitmap_info.file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 			fput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		/* No bitmap, OK to set a location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		long long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		if (strncmp(buf, "none", 4) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 			/* nothing to be done */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		else if (strncmp(buf, "file:", 5) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 			/* Not supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 			rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 			if (buf[0] == '+')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 				rv = kstrtoll(buf+1, 10, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 				rv = kstrtoll(buf, 10, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 			if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 			if (offset == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 				rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 			if (mddev->bitmap_info.external == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 			    mddev->major_version == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 			    offset != mddev->bitmap_info.default_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 				rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 			mddev->bitmap_info.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 			if (mddev->pers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 				struct bitmap *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 				bitmap = md_bitmap_create(mddev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 				mddev_suspend(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 				if (IS_ERR(bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 					rv = PTR_ERR(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 				else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 					mddev->bitmap = bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 					rv = md_bitmap_load(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 					if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 						mddev->bitmap_info.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 				if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 					md_bitmap_destroy(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 					mddev_resume(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 				mddev_resume(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	if (!mddev->external) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		/* Ensure new bitmap info is stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		 * metadata promptly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	mddev_unlock(mddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) static struct md_sysfs_entry bitmap_location =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) /* 'bitmap/space' is the space available at 'location' for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)  * bitmap.  This allows the kernel to know when it is safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)  * resize the bitmap to match a resized array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) space_show(struct mddev *mddev, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) space_store(struct mddev *mddev, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	unsigned long sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	rv = kstrtoul(buf, 10, &sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	if (sectors == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	if (mddev->bitmap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	    sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		return -EFBIG; /* Bitmap is too big for this small space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	/* could make sure it isn't too big, but that isn't really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	 * needed - user-space should be careful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	mddev->bitmap_info.space = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) static struct md_sysfs_entry bitmap_space =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) timeout_show(struct mddev *mddev, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	len = sprintf(page, "%lu", secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	if (jifs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	len += sprintf(page+len, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) timeout_store(struct mddev *mddev, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	/* timeout can be set at any time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	int rv = strict_strtoul_scaled(buf, &timeout, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	/* just to make sure we don't overflow... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	if (timeout >= LONG_MAX / HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	timeout = timeout * HZ / 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	if (timeout >= MAX_SCHEDULE_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		timeout = MAX_SCHEDULE_TIMEOUT-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	if (timeout < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	mddev->bitmap_info.daemon_sleep = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	if (mddev->thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		 * the bitmap is all clean and we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		 * adjust the timeout right now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 			mddev->thread->timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			md_wakeup_thread(mddev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) static struct md_sysfs_entry bitmap_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) backlog_show(struct mddev *mddev, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) backlog_store(struct mddev *mddev, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	unsigned long backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	int rv = kstrtoul(buf, 10, &backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	if (backlog > COUNTER_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	mddev->bitmap_info.max_write_behind = backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	if (!backlog && mddev->serial_info_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		/* serial_info_pool is not needed if backlog is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 		if (!mddev->serialize_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 			mddev_destroy_serial_pool(mddev, NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	} else if (backlog && !mddev->serial_info_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		/* serial_info_pool is needed since backlog is not zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		struct md_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		rdev_for_each(rdev, mddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 			mddev_create_serial_pool(mddev, rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	if (old_mwb != backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		md_bitmap_update_sb(mddev->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) static struct md_sysfs_entry bitmap_backlog =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) chunksize_show(struct mddev *mddev, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) chunksize_store(struct mddev *mddev, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	/* Can only be changed when no bitmap is active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	unsigned long csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	if (mddev->bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	rv = kstrtoul(buf, 10, &csize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	if (csize < 512 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	    !is_power_of_2(csize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	mddev->bitmap_info.chunksize = csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) static struct md_sysfs_entry bitmap_chunksize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) static ssize_t metadata_show(struct mddev *mddev, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	if (mddev_is_clustered(mddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		return sprintf(page, "clustered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	return sprintf(page, "%s\n", (mddev->bitmap_info.external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 				      ? "external" : "internal"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	if (mddev->bitmap ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	    mddev->bitmap_info.file ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	    mddev->bitmap_info.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	if (strncmp(buf, "external", 8) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		mddev->bitmap_info.external = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	else if ((strncmp(buf, "internal", 8) == 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 			(strncmp(buf, "clustered", 9) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 		mddev->bitmap_info.external = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) static struct md_sysfs_entry bitmap_metadata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) static ssize_t can_clear_show(struct mddev *mddev, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	spin_lock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	if (mddev->bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 					     "false" : "true"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		len = sprintf(page, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	spin_unlock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	if (mddev->bitmap == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	if (strncmp(buf, "false", 5) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		mddev->bitmap->need_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	else if (strncmp(buf, "true", 4) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 		if (mddev->degraded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		mddev->bitmap->need_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) static struct md_sysfs_entry bitmap_can_clear =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) behind_writes_used_show(struct mddev *mddev, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	spin_lock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	if (mddev->bitmap == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		ret = sprintf(page, "0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 		ret = sprintf(page, "%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 			      mddev->bitmap->behind_writes_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	spin_unlock(&mddev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	if (mddev->bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		mddev->bitmap->behind_writes_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) static struct md_sysfs_entry max_backlog_used =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)        behind_writes_used_show, behind_writes_used_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) static struct attribute *md_bitmap_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	&bitmap_location.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	&bitmap_space.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	&bitmap_timeout.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	&bitmap_backlog.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	&bitmap_chunksize.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	&bitmap_metadata.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	&bitmap_can_clear.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	&max_backlog_used.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) struct attribute_group md_bitmap_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	.name = "bitmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	.attrs = md_bitmap_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) };