^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 Oracle. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Darrick J. Wong <darrick.wong@oracle.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_bmap_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_rmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_rmap_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "scrub/scrub.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "scrub/common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "scrub/btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Set us up with an inode's bmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) xchk_setup_inode_bmap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct xfs_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) error = xchk_get_inode(sc, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) xfs_ilock(sc->ip, sc->ilock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * We don't want any ephemeral data fork updates sitting around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * while we inspect block mappings, so wait for directio to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * and flush dirty data if we have delalloc reservations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) inode_dio_wait(VFS_I(sc->ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Try to flush all incore state to disk before we examine the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * space mappings for the data fork. Leave accumulated errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * in the mapping for the writer threads to consume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * On ENOSPC or EIO writeback errors, we continue into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * extent mapping checks because write failures do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * necessarily imply anything about the correctness of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * metadata. The metadata and the file data could be on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * completely separate devices; a media failure might only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * affect a subset of the disk, etc. We can handle delalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * extents in the scrubber, so leaving them in memory is fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) error = filemap_fdatawrite(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) error = filemap_fdatawait_keep_errors(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (error && (error != -ENOSPC && error != -EIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Got the inode, lock it and we're ready to go. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) error = xchk_trans_alloc(sc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) sc->ilock_flags |= XFS_ILOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* scrub teardown will unlock and release the inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Inode fork block mapping (BMBT) scrubber.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * More complex than the others because we have to scrub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * all the extents regardless of whether or not the fork
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * is in btree format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct xchk_bmap_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct xfs_scrub *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) xfs_fileoff_t lastoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bool is_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) bool is_shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) bool was_loaded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int whichfork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Look for a corresponding rmap for this irec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) xchk_bmap_get_rmap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct xchk_bmap_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct xfs_bmbt_irec *irec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) xfs_agblock_t agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) uint64_t owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct xfs_rmap_irec *rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) xfs_fileoff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned int rflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int has_rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (info->whichfork == XFS_ATTR_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) rflags |= XFS_RMAP_ATTR_FORK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (irec->br_state == XFS_EXT_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) rflags |= XFS_RMAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * CoW staging extents are owned (on disk) by the refcountbt, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * their rmaps do not have offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (info->whichfork == XFS_COW_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) offset = irec->br_startoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * If the caller thinks this could be a shared bmbt extent (IOWs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * any data fork extent of a reflink inode) then we have to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * range rmap lookup to make sure we get the correct owner/offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (info->is_shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) owner, offset, rflags, rmap, &has_rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (!xchk_should_check_xref(info->sc, &error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) &info->sc->sa.rmap_cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Otherwise, use the (faster) regular lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) offset, rflags, &has_rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!xchk_should_check_xref(info->sc, &error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) &info->sc->sa.rmap_cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!has_rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!xchk_should_check_xref(info->sc, &error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) &info->sc->sa.rmap_cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!has_rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return has_rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* Make sure that we have rmapbt records for this extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) xchk_bmap_xref_rmap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct xchk_bmap_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct xfs_bmbt_irec *irec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) xfs_agblock_t agbno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct xfs_rmap_irec rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned long long rmap_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) uint64_t owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (info->whichfork == XFS_COW_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) owner = XFS_RMAP_OWN_COW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) owner = info->sc->ip->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Find the rmap record for this irec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Check the rmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (rmap.rm_startblock > agbno ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) agbno + irec->br_blockcount > rmap_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Check the logical offsets if applicable. CoW staging extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * don't track logical offsets since the mappings only exist in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (info->whichfork != XFS_COW_FORK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) rmap_end = (unsigned long long)rmap.rm_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) rmap.rm_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (rmap.rm_offset > irec->br_startoff ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) irec->br_startoff + irec->br_blockcount > rmap_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) xchk_fblock_xref_set_corrupt(info->sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) info->whichfork, irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (rmap.rm_owner != owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Check for discrepancies between the unwritten flag in the irec and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * the rmap. Note that the (in-memory) CoW fork distinguishes between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * unwritten and written extents, but we don't track that in the rmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * records because the blocks are owned (on-disk) by the refcountbt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * which doesn't track unwritten state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (owner != XFS_RMAP_OWN_COW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) !!(irec->br_state == XFS_EXT_UNWRITTEN) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (!!(info->whichfork == XFS_ATTR_FORK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Cross-reference a single rtdev extent record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) xchk_bmap_rt_iextent_xref(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct xchk_bmap_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct xfs_bmbt_irec *irec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) irec->br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* Cross-reference a single datadev extent record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) xchk_bmap_iextent_xref(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct xchk_bmap_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct xfs_bmbt_irec *irec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct xfs_mount *mp = info->sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) xfs_agnumber_t agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) xfs_agblock_t agbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) xfs_extlen_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) len = irec->br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) error = xchk_ag_init(info->sc, agno, &info->sc->sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!xchk_fblock_process_error(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) irec->br_startoff, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) xchk_xref_is_used_space(info->sc, agbno, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) xchk_bmap_xref_rmap(info, irec, agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) switch (info->whichfork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) case XFS_DATA_FORK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (xfs_is_reflink_inode(info->sc->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) case XFS_ATTR_FORK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) xchk_xref_is_not_shared(info->sc, agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) irec->br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) case XFS_COW_FORK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) xchk_xref_is_cow_staging(info->sc, agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) irec->br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) xchk_ag_free(info->sc, &info->sc->sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Directories and attr forks should never have blocks that can't be addressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * by a xfs_dablk_t.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) xchk_bmap_dirattr_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct xchk_bmap_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct xfs_bmbt_irec *irec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) xfs_fileoff_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!xfs_verify_dablk(mp, irec->br_startoff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) xchk_fblock_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) off = irec->br_startoff + irec->br_blockcount - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (!xfs_verify_dablk(mp, off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) xchk_fblock_set_corrupt(info->sc, info->whichfork, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* Scrub a single extent record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) xchk_bmap_iextent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct xchk_bmap_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct xfs_bmbt_irec *irec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct xfs_mount *mp = info->sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) xfs_filblks_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Check for out-of-order extents. This record could have come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * from the incore list, for which there is no ordering check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (irec->br_startoff < info->lastoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) xchk_fblock_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) xchk_bmap_dirattr_extent(ip, info, irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* There should never be a "hole" extent in either extent list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (irec->br_startblock == HOLESTARTBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) xchk_fblock_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Check for delalloc extents. We never iterate the ones in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * in-core extent scan, and we should never see these in the bmbt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (isnullstartblock(irec->br_startblock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) xchk_fblock_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Make sure the extent points to a valid place. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (irec->br_blockcount > MAXEXTLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) xchk_fblock_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) xchk_fblock_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) end = irec->br_startblock + irec->br_blockcount - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (info->is_rt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) (!xfs_verify_rtbno(mp, irec->br_startblock) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) !xfs_verify_rtbno(mp, end)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) xchk_fblock_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!info->is_rt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) (!xfs_verify_fsbno(mp, irec->br_startblock) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) !xfs_verify_fsbno(mp, end) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) XFS_FSB_TO_AGNO(mp, end)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) xchk_fblock_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* We don't allow unwritten extents on attr forks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (irec->br_state == XFS_EXT_UNWRITTEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) info->whichfork == XFS_ATTR_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) xchk_fblock_set_corrupt(info->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) irec->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (info->is_rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) xchk_bmap_rt_iextent_xref(ip, info, irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) xchk_bmap_iextent_xref(ip, info, irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) info->lastoff = irec->br_startoff + irec->br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Scrub a bmbt record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) xchk_bmapbt_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct xchk_btree *bs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) union xfs_btree_rec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct xfs_bmbt_irec irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct xfs_bmbt_irec iext_irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct xfs_iext_cursor icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct xchk_bmap_info *info = bs->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct xfs_inode *ip = bs->cur->bc_ino.ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct xfs_buf *bp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct xfs_btree_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, info->whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) uint64_t owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * Check the owners of the btree blocks up to the level below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * the root since the verifiers don't do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) bs->cur->bc_ptrs[0] == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) block = xfs_btree_get_block(bs->cur, i, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) owner = be64_to_cpu(block->bb_u.l.bb_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (owner != ip->i_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) xchk_fblock_set_corrupt(bs->sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) info->whichfork, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Check that the incore extent tree contains an extent that matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * this one exactly. We validate those cached bmaps later, so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * need to check them here. If the incore extent tree was just loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * from disk by the scrubber, we assume that its contents match what's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * on disk (we still hold the ILOCK) and skip the equivalence check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!info->was_loaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) &iext_irec) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) irec.br_startoff != iext_irec.br_startoff ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) irec.br_startblock != iext_irec.br_startblock ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) irec.br_blockcount != iext_irec.br_blockcount ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) irec.br_state != iext_irec.br_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) xchk_fblock_set_corrupt(bs->sc, info->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) irec.br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* Scan the btree records. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) xchk_bmap_btree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct xchk_bmap_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct xfs_owner_info oinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct xfs_mount *mp = sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct xfs_inode *ip = sc->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct xfs_btree_cur *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* Load the incore bmap cache if it's not loaded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) info->was_loaded = ifp->if_flags & XFS_IFEXTENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!info->was_loaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) error = xfs_iread_extents(sc->tp, ip, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* Check the btree structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) xfs_btree_del_cursor(cur, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct xchk_bmap_check_rmap_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct xfs_scrub *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) int whichfork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct xfs_iext_cursor icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Can we find bmaps that fit this rmap? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) xchk_bmap_check_rmap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct xfs_rmap_irec *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct xfs_bmbt_irec irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct xchk_bmap_check_rmap_info *sbcri = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct xfs_ifork *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct xfs_scrub *sc = sbcri->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) bool have_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Is this even the right fork? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (rec->rm_owner != sc->ip->i_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if ((sbcri->whichfork == XFS_ATTR_FORK) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* Now look up the bmbt record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!ifp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) xchk_fblock_set_corrupt(sc, sbcri->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) rec->rm_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) &sbcri->icur, &irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!have_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) xchk_fblock_set_corrupt(sc, sbcri->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) rec->rm_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * bmap extent record lengths are constrained to 2^21 blocks in length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * because of space constraints in the on-disk metadata structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * However, rmap extent record lengths are constrained only by AG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * length, so we have to loop through the bmbt to make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * entire rmap is covered by bmbt records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) while (have_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (irec.br_startoff != rec->rm_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) xchk_fblock_set_corrupt(sc, sbcri->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) rec->rm_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) cur->bc_ag.agno, rec->rm_startblock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) xchk_fblock_set_corrupt(sc, sbcri->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) rec->rm_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (irec.br_blockcount > rec->rm_blockcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) xchk_fblock_set_corrupt(sc, sbcri->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) rec->rm_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) rec->rm_startblock += irec.br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) rec->rm_offset += irec.br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) rec->rm_blockcount -= irec.br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (rec->rm_blockcount == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!have_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) xchk_fblock_set_corrupt(sc, sbcri->whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) rec->rm_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return -ECANCELED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* Make sure each rmap has a corresponding bmbt entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) xchk_bmap_check_ag_rmaps(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) xfs_agnumber_t agno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct xchk_bmap_check_rmap_info sbcri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct xfs_btree_cur *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct xfs_buf *agf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) goto out_agf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) sbcri.sc = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) sbcri.whichfork = whichfork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (error == -ECANCELED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) xfs_btree_del_cursor(cur, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) out_agf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) xfs_trans_brelse(sc->tp, agf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Make sure each rmap has a corresponding bmbt entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) xchk_bmap_check_rmaps(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) int whichfork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) xfs_agnumber_t agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) bool zero_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) whichfork == XFS_COW_FORK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Don't support realtime rmap checks yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * Only do this for complex maps that are in btree format, or for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * situations where we would seem to have a size but zero extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * The inode repair code can zap broken iforks, which means we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * to flag this bmap as corrupt if there are rmaps that need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * reattached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (whichfork == XFS_DATA_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) zero_size = i_size_read(VFS_I(sc->ip)) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) zero_size = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (ifp->if_format != XFS_DINODE_FMT_BTREE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) (zero_size || ifp->if_nextents > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Scrub an inode fork's block mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * First we scan every record in every btree block, if applicable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * Then we unconditionally scan the incore extent cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) xchk_bmap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) int whichfork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct xfs_bmbt_irec irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct xchk_bmap_info info = { NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct xfs_mount *mp = sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct xfs_inode *ip = sc->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) xfs_fileoff_t endoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct xfs_iext_cursor icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* Non-existent forks can be ignored. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (!ifp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) info.whichfork = whichfork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) info.sc = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) switch (whichfork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) case XFS_COW_FORK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* No CoW forks on non-reflink inodes/filesystems. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!xfs_is_reflink_inode(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) xchk_ino_set_corrupt(sc, sc->ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) case XFS_ATTR_FORK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (!xfs_sb_version_hasattr(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) !xfs_sb_version_hasattr2(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) xchk_ino_set_corrupt(sc, sc->ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ASSERT(whichfork == XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Check the fork values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) switch (ifp->if_format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) case XFS_DINODE_FMT_UUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) case XFS_DINODE_FMT_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) case XFS_DINODE_FMT_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* No mappings to check. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) case XFS_DINODE_FMT_EXTENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!(ifp->if_flags & XFS_IFEXTENTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) xchk_fblock_set_corrupt(sc, whichfork, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) case XFS_DINODE_FMT_BTREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (whichfork == XFS_COW_FORK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) xchk_fblock_set_corrupt(sc, whichfork, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) error = xchk_bmap_btree(sc, whichfork, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) xchk_fblock_set_corrupt(sc, whichfork, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* Find the offset of the last extent in the mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) error = xfs_bmap_last_offset(ip, &endoff, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* Scrub extent records. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) info.lastoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ifp = XFS_IFORK_PTR(ip, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) for_each_xfs_iext(ifp, &icur, &irec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (xchk_should_terminate(sc, &error) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (isnullstartblock(irec.br_startblock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (irec.br_startoff >= endoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) xchk_fblock_set_corrupt(sc, whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) irec.br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) error = xchk_bmap_iextent(ip, &info, &irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) error = xchk_bmap_check_rmaps(sc, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* Scrub an inode's data fork. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) xchk_bmap_data(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct xfs_scrub *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return xchk_bmap(sc, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Scrub an inode's attr fork. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) xchk_bmap_attr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct xfs_scrub *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return xchk_bmap(sc, XFS_ATTR_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Scrub an inode's CoW fork. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) xchk_bmap_cow(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct xfs_scrub *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!xfs_is_reflink_inode(sc->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return xchk_bmap(sc, XFS_COW_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }