Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Maximum size of each resync request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define RESYNC_BLOCK_SIZE (64*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Number of guaranteed raid bios in case of extreme VM load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define	NR_RAID_BIOS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) /* when we get a read error on a read-only array, we redirect to another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * device without failing the first device, or trying to over-write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * correct the read error.  To keep track of bad blocks on a per-bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define IO_BLOCKED ((struct bio *)1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /* When we successfully write to a known bad-block, we need to remove the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * bad-block marking which must be done from process context.  So we record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * the success by setting devs[n].bio to IO_MADE_GOOD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define IO_MADE_GOOD ((struct bio *)2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /* When there are this many requests queue to be written by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * the raid thread, we become 'congested' to provide back-pressure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * for writeback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static int max_queued_requests = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) /* for managing resync I/O pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) struct resync_pages {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	void		*raid_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct page	*pages[RESYNC_PAGES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static void rbio_pool_free(void *rbio, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	kfree(rbio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static inline int resync_alloc_pages(struct resync_pages *rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 				     gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	for (i = 0; i < RESYNC_PAGES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		rp->pages[i] = alloc_page(gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		if (!rp->pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		put_page(rp->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) static inline void resync_free_pages(struct resync_pages *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	for (i = 0; i < RESYNC_PAGES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		put_page(rp->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static inline void resync_get_all_pages(struct resync_pages *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	for (i = 0; i < RESYNC_PAGES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		get_page(rp->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static inline struct page *resync_fetch_page(struct resync_pages *rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 					     unsigned idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	return rp->pages[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * 'strct resync_pages' stores actual pages used for doing the resync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  *  IO, and it is per-bio, so make .bi_private points to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static inline struct resync_pages *get_resync_pages(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) /* generally called after bio_reset() for reseting bvec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			       int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	/* initialize bvec table again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		struct page *page = resync_fetch_page(rp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		int len = min_t(int, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		 * won't fail because the vec table is big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		 * enough to hold all these pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		bio_add_page(bio, page, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		size -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	} while (idx++ < RESYNC_PAGES && size > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }