^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 Oracle. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Darrick J. Wong <darrick.wong@oracle.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_qm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "scrub/scrub.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "scrub/common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* Convert a scrub type code to a DQ flag, or return 0 if error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static inline xfs_dqtype_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) xchk_quota_to_dqtype(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct xfs_scrub *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) switch (sc->sm->sm_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) case XFS_SCRUB_TYPE_UQUOTA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return XFS_DQTYPE_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) case XFS_SCRUB_TYPE_GQUOTA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return XFS_DQTYPE_GROUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) case XFS_SCRUB_TYPE_PQUOTA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return XFS_DQTYPE_PROJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Set us up to scrub a quota. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) xchk_setup_quota(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct xfs_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) xfs_dqtype_t dqtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) dqtype = xchk_quota_to_dqtype(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (dqtype == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) sc->flags |= XCHK_HAS_QUOTAOFFLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (!xfs_this_quota_on(sc->mp, dqtype))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) error = xchk_setup_fs(sc, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) sc->ip = xfs_quota_inode(sc->mp, dqtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) sc->ilock_flags = XFS_ILOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Quotas. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct xchk_quota_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct xfs_scrub *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) xfs_dqid_t last_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Scrub the fields in an individual quota item. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) xchk_quota_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct xfs_dquot *dq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) xfs_dqtype_t dqtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct xchk_quota_info *sqi = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct xfs_scrub *sc = sqi->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct xfs_mount *mp = sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct xfs_quotainfo *qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) xfs_fileoff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) xfs_ino_t fs_icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (xchk_should_terminate(sc, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * Except for the root dquot, the actual dquot we got must either have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * the same or higher id as we saw before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) offset = dq->q_id / qi->qi_dqperchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (dq->q_id && dq->q_id <= sqi->last_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) sqi->last_id = dq->q_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Warn if the hard limits are larger than the fs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Administrators can do this, though in production this seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * suspect, which is why we flag it for review.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Complain about corruption if the soft limit is greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * the hard limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (dq->q_blk.softlimit > dq->q_blk.hardlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (dq->q_ino.softlimit > dq->q_ino.hardlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Check the resource counts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) fs_icount = percpu_counter_sum(&mp->m_icount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Check that usage doesn't exceed physical limits. However, on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * a reflink filesystem we're allowed to exceed physical space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * if there are no quota limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (xfs_sb_version_hasreflink(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (mp->m_sb.sb_dblocks < dq->q_blk.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) xchk_fblock_set_warning(sc, XFS_DATA_FORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (mp->m_sb.sb_dblocks < dq->q_blk.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * We can violate the hard limits if the admin suddenly sets a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * lower limit than the actual usage. However, we flag it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * admin review.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (dq->q_id == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (dq->q_blk.hardlimit != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dq->q_blk.count > dq->q_blk.hardlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (dq->q_ino.hardlimit != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) dq->q_ino.count > dq->q_ino.hardlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (dq->q_rtb.hardlimit != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dq->q_rtb.count > dq->q_rtb.hardlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Check the quota's data fork. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) xchk_quota_data_fork(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct xfs_scrub *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct xfs_bmbt_irec irec = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct xfs_iext_cursor icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct xfs_ifork *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) xfs_fileoff_t max_dqid_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* Invoke the fork scrubber. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) error = xchk_metadata_inode_forks(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Check for data fork problems that apply only to quota files. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) for_each_xfs_iext(ifp, &icur, &irec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (xchk_should_terminate(sc, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * delalloc extents or blocks mapped above the highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * quota id shouldn't happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (isnullstartblock(irec.br_startblock) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) irec.br_startoff > max_dqid_off ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) irec.br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Scrub all of a quota type's items. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) xchk_quota(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct xfs_scrub *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct xchk_quota_info sqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct xfs_mount *mp = sc->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct xfs_quotainfo *qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) xfs_dqtype_t dqtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dqtype = xchk_quota_to_dqtype(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Look for problem extents. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) error = xchk_quota_data_fork(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * Check all the quota items. Now that we've checked the quota inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * data fork we have to drop ILOCK_EXCL to use the regular dquot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) xfs_iunlock(sc->ip, sc->ilock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) sc->ilock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) sqi.sc = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) sqi.last_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) sc->ilock_flags = XFS_ILOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) xfs_ilock(sc->ip, sc->ilock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) sqi.last_id * qi->qi_dqperchunk, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }