^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 Oracle. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Darrick J. Wong <darrick.wong@oracle.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_ialloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_da_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_reflink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_rmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_bmap_util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "scrub/scrub.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "scrub/common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "scrub/btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Grab total control of the inode metadata. It doesn't matter here if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * the file data is still changing; exclusive access to the metadata is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * the goal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) xchk_setup_inode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct xfs_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Try to get the inode. If the verifiers fail, we try again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * in raw mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) error = xchk_get_inode(sc, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) case -EFSCORRUPTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) case -EFSBADCRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return xchk_trans_alloc(sc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Got the inode, lock it and we're ready to go. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) xfs_ilock(sc->ip, sc->ilock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) error = xchk_trans_alloc(sc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) sc->ilock_flags |= XFS_ILOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* scrub teardown will unlock and release the inode for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Inode core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Validate di_extsize hint. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) xchk_inode_extsize(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct xfs_dinode *dip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) xfs_ino_t ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) uint16_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) uint16_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) xfs_failaddr_t fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mode, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (fa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Validate di_cowextsize hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * These functions must be kept in sync with each other.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) xchk_inode_cowextsize(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct xfs_dinode *dip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) xfs_ino_t ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) uint16_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) uint16_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) uint64_t flags2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) xfs_failaddr_t fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) fa = xfs_inode_validate_cowextsize(sc->mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) be32_to_cpu(dip->di_cowextsize), mode, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (fa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Make sure the di_flags make sense for the inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) xchk_inode_flags(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct xfs_dinode *dip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) xfs_ino_t ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) uint16_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) uint16_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct xfs_mount *mp = sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* di_flags are all taken, last bit cannot be used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (flags & ~XFS_DIFLAG_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* rt flags require rt device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* new rt bitmap flag only valid for rbmino */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if ((flags & XFS_DIFLAG_NEWRTBM) && ino != mp->m_sb.sb_rbmino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* directory-only flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if ((flags & (XFS_DIFLAG_RTINHERIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) XFS_DIFLAG_EXTSZINHERIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) XFS_DIFLAG_PROJINHERIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) XFS_DIFLAG_NOSYMLINKS)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) !S_ISDIR(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* file-only flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if ((flags & (XFS_DIFLAG_REALTIME | FS_XFLAG_EXTSIZE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) !S_ISREG(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* filestreams and rt make no sense */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if ((flags & XFS_DIFLAG_FILESTREAM) && (flags & XFS_DIFLAG_REALTIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Make sure the di_flags2 make sense for the inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) xchk_inode_flags2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct xfs_dinode *dip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) xfs_ino_t ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) uint16_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) uint16_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) uint64_t flags2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct xfs_mount *mp = sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* Unknown di_flags2 could be from a future kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (flags2 & ~XFS_DIFLAG2_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) xchk_ino_set_warning(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* reflink flag requires reflink feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if ((flags2 & XFS_DIFLAG2_REFLINK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) !xfs_sb_version_hasreflink(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* cowextsize flag is checked w.r.t. mode separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* file/dir-only flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if ((flags2 & XFS_DIFLAG2_DAX) && !(S_ISREG(mode) || S_ISDIR(mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* file-only flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if ((flags2 & XFS_DIFLAG2_REFLINK) && !S_ISREG(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* realtime and reflink make no sense, currently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if ((flags & XFS_DIFLAG_REALTIME) && (flags2 & XFS_DIFLAG2_REFLINK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* dax and reflink make no sense, currently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if ((flags2 & XFS_DIFLAG2_DAX) && (flags2 & XFS_DIFLAG2_REFLINK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* no bigtime iflag without the bigtime feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (xfs_dinode_has_bigtime(dip) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) !xfs_sb_version_hasbigtime(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) xchk_dinode_nsec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) xfs_ino_t ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct xfs_dinode *dip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) const xfs_timestamp_t ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct timespec64 tv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) tv = xfs_inode_from_disk_ts(dip, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (tv.tv_nsec < 0 || tv.tv_nsec >= NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Scrub all the ondisk inode fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) xchk_dinode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct xfs_dinode *dip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) xfs_ino_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct xfs_mount *mp = sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) size_t fork_recs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned long long isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) uint64_t flags2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) uint32_t nextents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) uint16_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) uint16_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) flags = be16_to_cpu(dip->di_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (dip->di_version >= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) flags2 = be64_to_cpu(dip->di_flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) flags2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* di_mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mode = be16_to_cpu(dip->di_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) switch (mode & S_IFMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) case S_IFLNK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) case S_IFREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) case S_IFDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) case S_IFCHR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) case S_IFBLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) case S_IFIFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case S_IFSOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* mode is recognized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* v1/v2 fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) switch (dip->di_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * We autoconvert v1 inodes into v2 inodes on writeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * so just mark this inode for preening.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) xchk_ino_set_preen(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (dip->di_onlink != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (dip->di_mode == 0 && sc->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (dip->di_projid_hi != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) !xfs_sb_version_hasprojid32bit(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * di_uid/di_gid -- -1 isn't invalid, but there's no way that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * userspace could have created that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (dip->di_uid == cpu_to_be32(-1U) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) dip->di_gid == cpu_to_be32(-1U))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) xchk_ino_set_warning(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* di_format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) switch (dip->di_format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) case XFS_DINODE_FMT_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (!S_ISCHR(mode) && !S_ISBLK(mode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) !S_ISFIFO(mode) && !S_ISSOCK(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) case XFS_DINODE_FMT_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!S_ISDIR(mode) && !S_ISLNK(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) case XFS_DINODE_FMT_EXTENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!S_ISREG(mode) && !S_ISDIR(mode) && !S_ISLNK(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) case XFS_DINODE_FMT_BTREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (!S_ISREG(mode) && !S_ISDIR(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) case XFS_DINODE_FMT_UUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* di_[amc]time.nsec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) xchk_dinode_nsec(sc, ino, dip, dip->di_atime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) xchk_dinode_nsec(sc, ino, dip, dip->di_mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) xchk_dinode_nsec(sc, ino, dip, dip->di_ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * di_size. xfs_dinode_verify checks for things that screw up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * the VFS such as the upper bit being set and zero-length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * symlinks/directories, but we can do more here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) isize = be64_to_cpu(dip->di_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (isize & (1ULL << 63))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* Devices, fifos, and sockets must have zero size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!S_ISDIR(mode) && !S_ISREG(mode) && !S_ISLNK(mode) && isize != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Directories can't be larger than the data section size (32G) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (S_ISDIR(mode) && (isize == 0 || isize >= XFS_DIR2_SPACE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Symlinks can't be larger than SYMLINK_MAXLEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (S_ISLNK(mode) && (isize == 0 || isize >= XFS_SYMLINK_MAXLEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Warn if the running kernel can't handle the kinds of offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * needed to deal with the file size. In other words, if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * pagecache can't cache all the blocks in this file due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * overly large offsets, flag the inode for admin review.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (isize >= mp->m_super->s_maxbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) xchk_ino_set_warning(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* di_nblocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (flags2 & XFS_DIFLAG2_REFLINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ; /* nblocks can exceed dblocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) } else if (flags & XFS_DIFLAG_REALTIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * nblocks is the sum of data extents (in the rtdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * attr extents (in the datadev), and both forks' bmbt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * blocks (in the datadev). This clumsy check is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * best we can do without cross-referencing with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * inode forks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (be64_to_cpu(dip->di_nblocks) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (be64_to_cpu(dip->di_nblocks) >= mp->m_sb.sb_dblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) xchk_inode_flags(sc, dip, ino, mode, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) xchk_inode_extsize(sc, dip, ino, mode, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* di_nextents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) nextents = be32_to_cpu(dip->di_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) fork_recs = XFS_DFORK_DSIZE(dip, mp) / sizeof(struct xfs_bmbt_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) switch (dip->di_format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) case XFS_DINODE_FMT_EXTENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (nextents > fork_recs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) case XFS_DINODE_FMT_BTREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (nextents <= fork_recs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (nextents != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* di_forkoff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (XFS_DFORK_APTR(dip) >= (char *)dip + mp->m_sb.sb_inodesize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (dip->di_anextents != 0 && dip->di_forkoff == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (dip->di_forkoff == 0 && dip->di_aformat != XFS_DINODE_FMT_EXTENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* di_aformat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (dip->di_aformat != XFS_DINODE_FMT_LOCAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) dip->di_aformat != XFS_DINODE_FMT_EXTENTS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) dip->di_aformat != XFS_DINODE_FMT_BTREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* di_anextents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) nextents = be16_to_cpu(dip->di_anextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) fork_recs = XFS_DFORK_ASIZE(dip, mp) / sizeof(struct xfs_bmbt_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) switch (dip->di_aformat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) case XFS_DINODE_FMT_EXTENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (nextents > fork_recs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) case XFS_DINODE_FMT_BTREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (nextents <= fork_recs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (nextents != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (dip->di_version >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) xchk_dinode_nsec(sc, ino, dip, dip->di_crtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) xchk_inode_flags2(sc, dip, ino, mode, flags, flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) xchk_inode_cowextsize(sc, dip, ino, mode, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Make sure the finobt doesn't think this inode is free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * We don't have to check the inobt ourselves because we got the inode via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * IGET_UNTRUSTED, which checks the inobt for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) xchk_inode_xref_finobt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) xfs_ino_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct xfs_inobt_rec_incore rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) xfs_agino_t agino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int has_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) agino = XFS_INO_TO_AGINO(sc->mp, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Try to get the finobt record. If we can't get it, then we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * in good shape.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) error = xfs_inobt_lookup(sc->sa.fino_cur, agino, XFS_LOOKUP_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) &has_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) !has_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) error = xfs_inobt_get_rec(sc->sa.fino_cur, &rec, &has_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) !has_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Otherwise, make sure this record either doesn't cover this inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * or that it does but it's marked present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (rec.ir_startino > agino ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) rec.ir_startino + XFS_INODES_PER_CHUNK <= agino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) xchk_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* Cross reference the inode fields with the forks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) xchk_inode_xref_bmap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct xfs_dinode *dip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) xfs_extnum_t nextents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) xfs_filblks_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) xfs_filblks_t acount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (xchk_skip_xref(sc->sm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Walk all the extents to check nextents/naextents/nblocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) &nextents, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (!xchk_should_check_xref(sc, &error, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (nextents < be32_to_cpu(dip->di_nextents))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) &nextents, &acount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!xchk_should_check_xref(sc, &error, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (nextents != be16_to_cpu(dip->di_anextents))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* Check nblocks against the inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (count + acount != be64_to_cpu(dip->di_nblocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Cross-reference with the other btrees. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) xchk_inode_xref(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) xfs_ino_t ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct xfs_dinode *dip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) xfs_agnumber_t agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) xfs_agblock_t agbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) agno = XFS_INO_TO_AGNO(sc->mp, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) agbno = XFS_INO_TO_AGBNO(sc->mp, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) error = xchk_ag_init(sc, agno, &sc->sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (!xchk_xref_process_error(sc, agno, agbno, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) xchk_xref_is_used_space(sc, agbno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) xchk_inode_xref_finobt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_INODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) xchk_xref_is_not_shared(sc, agbno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) xchk_inode_xref_bmap(sc, dip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) xchk_ag_free(sc, &sc->sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * If the reflink iflag disagrees with a scan for shared data fork extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * either flag an error (shared extents w/ no flag) or a preen (flag set w/o
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * any shared extents). We already checked for reflink iflag set on a non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * reflink filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) xchk_inode_check_reflink_iflag(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) xfs_ino_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct xfs_mount *mp = sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) bool has_shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!xfs_sb_version_hasreflink(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) &has_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!xchk_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) XFS_INO_TO_AGBNO(mp, ino), &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (xfs_is_reflink_inode(sc->ip) && !has_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) xchk_ino_set_preen(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) else if (!xfs_is_reflink_inode(sc->ip) && has_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) xchk_ino_set_corrupt(sc, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Scrub an inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) xchk_inode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct xfs_scrub *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct xfs_dinode di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * If sc->ip is NULL, that means that the setup function called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * xfs_iget to look up the inode. xfs_iget returned a EFSCORRUPTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * and a NULL inode, so flag the corruption error and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (!sc->ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) xchk_ino_set_corrupt(sc, sc->sm->sm_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Scrub the inode core. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) xfs_inode_to_disk(sc->ip, &di, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) xchk_dinode(sc, &di, sc->ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * Look for discrepancies between file's data blocks and the reflink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * iflag. We already checked the iflag against the file mode when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * we scrubbed the dinode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (S_ISREG(VFS_I(sc->ip)->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) xchk_inode_check_reflink_iflag(sc, sc->ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) xchk_inode_xref(sc, sc->ip->i_ino, &di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }