^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2018 Oracle. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Darrick J. Wong <darrick.wong@oracle.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "scrub/bitmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Set a range of this bitmap. Caller must ensure the range is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * This is the logical equivalent of bitmap |= mask(start, len).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) xbitmap_set(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct xbitmap *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) uint64_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) uint64_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct xbitmap_range *bmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (!bmr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) INIT_LIST_HEAD(&bmr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) bmr->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bmr->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) list_add_tail(&bmr->list, &bitmap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Free everything related to this bitmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) xbitmap_destroy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct xbitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct xbitmap_range *bmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct xbitmap_range *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) for_each_xbitmap_extent(bmr, n, bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) list_del(&bmr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) kmem_free(bmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Set up a per-AG block bitmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) xbitmap_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct xbitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) INIT_LIST_HEAD(&bitmap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Compare two btree extents. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) xbitmap_range_cmp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct list_head *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct list_head *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct xbitmap_range *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct xbitmap_range *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ap = container_of(a, struct xbitmap_range, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) bp = container_of(b, struct xbitmap_range, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (ap->start > bp->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (ap->start < bp->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Remove all the blocks mentioned in @sub from the extents in @bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * The intent is that callers will iterate the rmapbt for all of its records
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * for a given owner to generate @bitmap; and iterate all the blocks of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * metadata structures that are not being rebuilt and have the same rmapbt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * owner to generate @sub. This routine subtracts all the extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * mentioned in sub from all the extents linked in @bitmap, which leaves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @bitmap as the list of blocks that are not accounted for, which we assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * are the dead blocks of the old metadata structure. The blocks mentioned in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @bitmap can be reaped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * This is the logical equivalent of bitmap &= ~sub.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define LEFT_ALIGNED (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define RIGHT_ALIGNED (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) xbitmap_disunion(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct xbitmap *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct xbitmap *sub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct list_head *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct xbitmap_range *br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct xbitmap_range *new_br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct xbitmap_range *sub_br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) uint64_t sub_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) uint64_t sub_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (list_empty(&bitmap->list) || list_empty(&sub->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ASSERT(!list_empty(&sub->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) list_sort(NULL, &sub->list, xbitmap_range_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Now that we've sorted both lists, we iterate bitmap once, rolling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * forward through sub and/or bitmap as necessary until we find an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * overlap or reach the end of either list. We do not reset lp to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * head of bitmap nor do we reset sub_br to the head of sub. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * list traversal is similar to merge sort, but we're deleting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * instead. In this manner we avoid O(n^2) operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sub_br = list_first_entry(&sub->list, struct xbitmap_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) lp = bitmap->list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) while (lp != &bitmap->list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) br = list_entry(lp, struct xbitmap_range, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Advance sub_br and/or br until we find a pair that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * intersect or we run out of extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) while (sub_br->start + sub_br->len <= br->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (list_is_last(&sub_br->list, &sub->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) sub_br = list_next_entry(sub_br, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (sub_br->start >= br->start + br->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) lp = lp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* trim sub_br to fit the extent we have */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) sub_start = sub_br->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) sub_len = sub_br->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (sub_br->start < br->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) sub_len -= br->start - sub_br->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) sub_start = br->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (sub_len > br->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) sub_len = br->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (sub_start == br->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) state |= LEFT_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (sub_start + sub_len == br->start + br->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) state |= RIGHT_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) case LEFT_ALIGNED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Coincides with only the left. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) br->start += sub_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) br->len -= sub_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) case RIGHT_ALIGNED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Coincides with only the right. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) br->len -= sub_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) lp = lp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) case LEFT_ALIGNED | RIGHT_ALIGNED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* Total overlap, just delete ex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) lp = lp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) list_del(&br->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) kmem_free(br);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Deleting from the middle: add the new right extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * and then shrink the left extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) new_br = kmem_alloc(sizeof(struct xbitmap_range),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) KM_MAYFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!new_br) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) INIT_LIST_HEAD(&new_br->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) new_br->start = sub_start + sub_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) new_br->len = br->start + br->len - new_br->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) list_add(&new_br->list, &br->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) br->len = sub_start - br->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) lp = lp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #undef LEFT_ALIGNED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #undef RIGHT_ALIGNED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * Record all btree blocks seen while iterating all records of a btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * We know that the btree query_all function starts at the left edge and walks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * towards the right edge of the tree. Therefore, we know that we can walk up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * the btree cursor towards the root; if the pointer for a given level points
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * to the first record/key in that block, we haven't seen this block before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * and therefore we need to remember that we saw this block in the btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * So if our btree is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * / | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * 1 2 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Pretend for this example that each leaf block has 100 btree records. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * that we saw block 1. Then we observe that bc_ptrs[1] == 1, so we record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * block 4. The list is [1, 4].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * loop. The list remains [1, 4].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * For the 101st btree record, we've moved onto leaf block 2. Now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * bc_ptrs[0] == 1 again, so we record that we saw block 2. We see that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * bc_ptrs[1] == 2, so we exit the loop. The list is now [1, 4, 2].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * For the 102nd record, bc_ptrs[0] == 2, so we continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * For the 201st record, we've moved on to leaf block 3. bc_ptrs[0] == 1, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * we add 3 to the list. Now it is [1, 4, 2, 3].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * For the 300th record we just exit, with the list being [1, 4, 2, 3].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Record all the buffers pointed to by the btree cursor. Callers already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * engaged in a btree walk should call this function to capture the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * blocks going from the leaf towards the root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) xbitmap_set_btcur_path(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct xbitmap *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct xfs_btree_cur *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) xfs_fsblock_t fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) xfs_btree_get_block(cur, i, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) error = xbitmap_set(bitmap, fsb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Collect a btree's block in the bitmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) xbitmap_collect_btblock(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct xbitmap *bitmap = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) xfs_fsblock_t fsbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) xfs_btree_get_block(cur, level, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return xbitmap_set(bitmap, fsbno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Walk the btree and mark the bitmap wherever a btree block is found. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) xbitmap_set_btblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct xbitmap *bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct xfs_btree_cur *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) XFS_BTREE_VISIT_ALL, bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* How many bits are set in this bitmap? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) uint64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) xbitmap_hweight(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct xbitmap *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct xbitmap_range *bmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct xbitmap_range *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) uint64_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) for_each_xbitmap_extent(bmr, n, bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ret += bmr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }