^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2000-2005 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_sb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_iwalk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_bmap_util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_trans_space.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xfs_qm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "xfs_icache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "xfs_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * The global quota manager. There is only one of these for the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * system, _not_ one per file system. XQM keeps track of the overall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * quota functionality, including maintaining the freelist and hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * tables of dquots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * We use the batch lookup interface to iterate over the dquots as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * currently is the only interface into the radix tree code that allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * fuzzy lookups instead of exact matches. Holding the lock over multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * operations is fine as all callers are used either during mount/umount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * or quotaoff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define XFS_DQ_LOOKUP_BATCH 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) xfs_qm_dquot_walk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int (*execute)(struct xfs_dquot *dqp, void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct xfs_quotainfo *qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) uint32_t next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int last_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int skipped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int nr_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) skipped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) next_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) nr_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) mutex_lock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) nr_found = radix_tree_gang_lookup(tree, (void **)batch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) next_index, XFS_DQ_LOOKUP_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!nr_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) mutex_unlock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) for (i = 0; i < nr_found; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct xfs_dquot *dqp = batch[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) next_index = dqp->q_id + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) error = execute(batch[i], data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (error == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) skipped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (error && last_error != -EFSCORRUPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) last_error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) mutex_unlock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* bail out if the filesystem is corrupted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (last_error == -EFSCORRUPTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) skipped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* we're done if id overflows back to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!next_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (skipped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return last_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Purge a dquot from all tracking data structures and free it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) xfs_qm_dqpurge(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct xfs_dquot *dqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct xfs_mount *mp = dqp->q_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct xfs_quotainfo *qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) xfs_dqlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) dqp->q_flags |= XFS_DQFLAG_FREEING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) xfs_dqflock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * If we are turning this type of quotas off, we don't care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * about the dirty metadata sitting in this dquot. OTOH, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * we're unmounting, we do care, so we flush it and wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (XFS_DQ_IS_DIRTY(dqp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct xfs_buf *bp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * We don't care about getting disk errors here. We need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * to purge this dquot anyway, so we go ahead regardless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) error = xfs_qm_dqflush(dqp, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) error = xfs_bwrite(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) } else if (error == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) dqp->q_flags &= ~XFS_DQFLAG_FREEING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) xfs_dqflock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ASSERT(atomic_read(&dqp->q_pincount) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) xfs_dqfunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) qi->qi_dquots--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * We move dquots to the freelist as soon as their reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * hits zero, so it really should be on the freelist here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ASSERT(!list_empty(&dqp->q_lru));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) list_lru_del(&qi->qi_lru, &dqp->q_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) XFS_STATS_DEC(mp, xs_qm_dquot_unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) xfs_qm_dqdestroy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * Purge the dquot cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) xfs_qm_dqpurge_all(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) uint flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (flags & XFS_QMOPT_UQUOTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (flags & XFS_QMOPT_GQUOTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (flags & XFS_QMOPT_PQUOTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Just destroy the quotainfo structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) xfs_qm_unmount(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (mp->m_quotainfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) xfs_qm_destroy_quotainfo(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Called from the vfsops layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) xfs_qm_unmount_quotas(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) xfs_mount_t *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * Release the dquots that root inode, et al might be holding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * before we flush quotas and blow away the quotainfo structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ASSERT(mp->m_rootip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) xfs_qm_dqdetach(mp->m_rootip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (mp->m_rbmip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) xfs_qm_dqdetach(mp->m_rbmip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (mp->m_rsumip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) xfs_qm_dqdetach(mp->m_rsumip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Release the quota inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (mp->m_quotainfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (mp->m_quotainfo->qi_uquotaip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) xfs_irele(mp->m_quotainfo->qi_uquotaip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) mp->m_quotainfo->qi_uquotaip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (mp->m_quotainfo->qi_gquotaip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) xfs_irele(mp->m_quotainfo->qi_gquotaip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) mp->m_quotainfo->qi_gquotaip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (mp->m_quotainfo->qi_pquotaip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) xfs_irele(mp->m_quotainfo->qi_pquotaip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) mp->m_quotainfo->qi_pquotaip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) xfs_qm_dqattach_one(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bool doalloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct xfs_dquot **IO_idqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * See if we already have it in the inode itself. IO_idqpp is &i_udquot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * or &i_gdquot. This made the code look weird, but made the logic a lot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * simpler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dqp = *IO_idqpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (dqp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) trace_xfs_dqattach_found(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * Find the dquot from somewhere. This bumps the reference count of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * dquot and returns it locked. This can return ENOENT if dquot didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * turned off suddenly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) trace_xfs_dqattach_get(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * dqget may have dropped and re-acquired the ilock, but it guarantees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * that the dquot returned is the one that should go in the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) *IO_idqpp = dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) xfs_qm_need_dqattach(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct xfs_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!XFS_IS_QUOTA_RUNNING(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!XFS_IS_QUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (!XFS_NOT_DQATTACHED(mp, ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * If @doalloc is true, the dquot(s) will be allocated if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Inode may get unlocked and relocked in here, and the caller must deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * the consequences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) xfs_qm_dqattach_locked(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) xfs_inode_t *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) bool doalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) xfs_mount_t *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!xfs_qm_need_dqattach(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) doalloc, &ip->i_udquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ASSERT(ip->i_udquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) doalloc, &ip->i_gdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ASSERT(ip->i_gdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) doalloc, &ip->i_pdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ASSERT(ip->i_pdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * Don't worry about the dquots that we may have attached before any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * error - they'll get detached later if it has not already been done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) xfs_qm_dqattach(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct xfs_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!xfs_qm_need_dqattach(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) xfs_ilock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) error = xfs_qm_dqattach_locked(ip, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Release dquots (and their references) if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * The inode should be locked EXCL except when this's called by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * xfs_ireclaim.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) xfs_qm_dqdetach(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) xfs_inode_t *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) trace_xfs_dquot_dqdetach(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (ip->i_udquot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) xfs_qm_dqrele(ip->i_udquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ip->i_udquot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (ip->i_gdquot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) xfs_qm_dqrele(ip->i_gdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ip->i_gdquot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (ip->i_pdquot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) xfs_qm_dqrele(ip->i_pdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ip->i_pdquot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct xfs_qm_isolate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct list_head buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct list_head dispose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static enum lru_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) xfs_qm_dquot_isolate(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct list_head *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct list_lru_one *lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) spinlock_t *lru_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) __releases(lru_lock) __acquires(lru_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct xfs_dquot *dqp = container_of(item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct xfs_dquot, q_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct xfs_qm_isolate *isol = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!xfs_dqlock_nowait(dqp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) goto out_miss_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * This dquot has acquired a reference in the meantime remove it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * the freelist and try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (dqp->q_nrefs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) trace_xfs_dqreclaim_want(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) list_lru_isolate(lru, &dqp->q_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return LRU_REMOVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * If the dquot is dirty, flush it. If it's already being flushed, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * skip it so there is time for the IO to complete before we try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * reclaim it again on the next LRU pass.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!xfs_dqflock_nowait(dqp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) goto out_miss_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (XFS_DQ_IS_DIRTY(dqp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct xfs_buf *bp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) trace_xfs_dqreclaim_dirty(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* we have to drop the LRU lock to flush the dquot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) spin_unlock(lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) error = xfs_qm_dqflush(dqp, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) goto out_unlock_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) xfs_buf_delwri_queue(bp, &isol->buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto out_unlock_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) xfs_dqfunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * Prevent lookups now that we are past the point of no return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dqp->q_flags |= XFS_DQFLAG_FREEING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ASSERT(dqp->q_nrefs == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) trace_xfs_dqreclaim_done(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return LRU_REMOVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) out_miss_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) trace_xfs_dqreclaim_busy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return LRU_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) out_unlock_dirty:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) trace_xfs_dqreclaim_busy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) spin_lock(lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return LRU_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) xfs_qm_shrink_scan(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct xfs_quotainfo *qi = container_of(shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct xfs_quotainfo, qi_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct xfs_qm_isolate isol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) unsigned long freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) INIT_LIST_HEAD(&isol.buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) INIT_LIST_HEAD(&isol.dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) freed = list_lru_shrink_walk(&qi->qi_lru, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) xfs_qm_dquot_isolate, &isol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) error = xfs_buf_delwri_submit(&isol.buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) while (!list_empty(&isol.dispose)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) list_del_init(&dqp->q_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) xfs_qm_dqfree_one(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) xfs_qm_shrink_count(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct xfs_quotainfo *qi = container_of(shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct xfs_quotainfo, qi_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return list_lru_shrink_count(&qi->qi_lru, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) xfs_qm_set_defquota(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct xfs_quotainfo *qinf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct xfs_def_quota *defq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * Timers and warnings have been already set, let's just set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * default limits for this quota type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) defq->blk.hard = dqp->q_blk.hardlimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) defq->blk.soft = dqp->q_blk.softlimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) defq->ino.hard = dqp->q_ino.hardlimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) defq->ino.soft = dqp->q_ino.softlimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) defq->rtb.hard = dqp->q_rtb.hardlimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) defq->rtb.soft = dqp->q_rtb.softlimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) xfs_qm_dqdestroy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Initialize quota time limits from the root dquot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) xfs_qm_init_timelimits(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) xfs_dqtype_t type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct xfs_quotainfo *qinf = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct xfs_def_quota *defq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) defq = xfs_get_defquota(qinf, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) defq->blk.time = XFS_QM_BTIMELIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) defq->ino.time = XFS_QM_ITIMELIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) defq->rtb.time = XFS_QM_RTBTIMELIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) defq->blk.warn = XFS_QM_BWARNLIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) defq->ino.warn = XFS_QM_IWARNLIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * We try to get the limits from the superuser's limits fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * This is quite hacky, but it is standard quota practice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Since we may not have done a quotacheck by this point, just read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * the dquot without attaching it to any hashtables or lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * The warnings and timers set the grace period given to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * a user or group before he or she can not perform any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * more writing. If it is zero, a default is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (dqp->q_blk.timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) defq->blk.time = dqp->q_blk.timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (dqp->q_ino.timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) defq->ino.time = dqp->q_ino.timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (dqp->q_rtb.timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) defq->rtb.time = dqp->q_rtb.timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (dqp->q_blk.warnings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) defq->blk.warn = dqp->q_blk.warnings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (dqp->q_ino.warnings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) defq->ino.warn = dqp->q_ino.warnings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (dqp->q_rtb.warnings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) defq->rtb.warn = dqp->q_rtb.warnings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) xfs_qm_dqdestroy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * This initializes all the quota information that's kept in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * mount structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) xfs_qm_init_quotainfo(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct xfs_quotainfo *qinf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ASSERT(XFS_IS_QUOTA_RUNNING(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) error = list_lru_init(&qinf->qi_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto out_free_qinf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * See if quotainodes are setup, and if not, allocate them,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * and change the superblock accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) error = xfs_qm_init_quotainos(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto out_free_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) mutex_init(&qinf->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* mutex used to serialize quotaoffs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) mutex_init(&qinf->qi_quotaofflock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* Precalc some constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) qinf->qi_expiry_min =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) qinf->qi_expiry_max =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) qinf->qi_expiry_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (XFS_IS_UQUOTA_RUNNING(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (XFS_IS_GQUOTA_RUNNING(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (XFS_IS_PQUOTA_RUNNING(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) error = register_shrinker(&qinf->qi_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) goto out_free_inos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) out_free_inos:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) mutex_destroy(&qinf->qi_quotaofflock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mutex_destroy(&qinf->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) xfs_qm_destroy_quotainos(qinf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) out_free_lru:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) list_lru_destroy(&qinf->qi_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) out_free_qinf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) kmem_free(qinf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) mp->m_quotainfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * Gets called when unmounting a filesystem or when all quotas get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * turned off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * This purges the quota inodes, destroys locks and frees itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) xfs_qm_destroy_quotainfo(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct xfs_quotainfo *qi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ASSERT(qi != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) unregister_shrinker(&qi->qi_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) list_lru_destroy(&qi->qi_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) xfs_qm_destroy_quotainos(qi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) mutex_destroy(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) mutex_destroy(&qi->qi_quotaofflock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) kmem_free(qi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mp->m_quotainfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * Create an inode and return with a reference already taken, but unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * This is how we create quota inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) xfs_qm_qino_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) xfs_mount_t *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) xfs_inode_t **ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) uint flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) xfs_trans_t *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) bool need_alloc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) *ip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * With superblock that doesn't have separate pquotino, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * share an inode between gquota and pquota. If the on-disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * superblock has GQUOTA and the filesystem is now mounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * with PQUOTA, just use sb_gquotino for sb_pquotino and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * vice-versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) xfs_ino_t ino = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if ((flags & XFS_QMOPT_PQUOTA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) (mp->m_sb.sb_gquotino != NULLFSINO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ino = mp->m_sb.sb_gquotino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (XFS_IS_CORRUPT(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) mp->m_sb.sb_pquotino != NULLFSINO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) } else if ((flags & XFS_QMOPT_GQUOTA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) (mp->m_sb.sb_pquotino != NULLFSINO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) ino = mp->m_sb.sb_pquotino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (XFS_IS_CORRUPT(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) mp->m_sb.sb_gquotino != NULLFSINO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (ino != NULLFSINO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) error = xfs_iget(mp, NULL, ino, 0, 0, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) mp->m_sb.sb_gquotino = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mp->m_sb.sb_pquotino = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) need_alloc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 0, 0, &tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (need_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) xfs_trans_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Make the changes in the superblock, and log those too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * sbfields arg may contain fields other than *QUOTINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * VERSIONNUM for example.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) spin_lock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (flags & XFS_QMOPT_SBVERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) xfs_sb_version_addquota(&mp->m_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) mp->m_sb.sb_uquotino = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) mp->m_sb.sb_gquotino = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) mp->m_sb.sb_pquotino = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* qflags will get updated fully _after_ quotacheck */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (flags & XFS_QMOPT_UQUOTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mp->m_sb.sb_uquotino = (*ip)->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) else if (flags & XFS_QMOPT_GQUOTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) mp->m_sb.sb_gquotino = (*ip)->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) mp->m_sb.sb_pquotino = (*ip)->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) spin_unlock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) xfs_log_sb(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) error = xfs_trans_commit(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ASSERT(XFS_FORCED_SHUTDOWN(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) xfs_alert(mp, "%s failed (error %d)!", __func__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (need_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) xfs_finish_inode_setup(*ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) xfs_qm_reset_dqcounts(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct xfs_buf *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) xfs_dqid_t id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) xfs_dqtype_t type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct xfs_dqblk *dqb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) trace_xfs_reset_dqcounts(bp, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * Reset all counters and timers. They'll be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * started afresh by xfs_qm_quotacheck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sizeof(xfs_dqblk_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) dqb = bp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct xfs_disk_dquot *ddq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ddq = (struct xfs_disk_dquot *)&dqb[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * Do a sanity check, and if needed, repair the dqblk. Don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * output any warnings because it's perfectly possible to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * find uninitialised dquot blks. See comment in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * xfs_dquot_verify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) xfs_dqblk_repair(mp, &dqb[j], id + j, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * Reset type in case we are reusing group quota file for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * project quotas or vice versa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ddq->d_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ddq->d_bcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ddq->d_icount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ddq->d_rtbcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * dquot id 0 stores the default grace period and the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * warning limit that were set by the administrator, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * should not reset them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (ddq->d_id != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ddq->d_btimer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ddq->d_itimer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ddq->d_rtbtimer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ddq->d_bwarns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ddq->d_iwarns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ddq->d_rtbwarns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (xfs_sb_version_hasbigtime(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) ddq->d_type |= XFS_DQTYPE_BIGTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (xfs_sb_version_hascrc(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) xfs_update_cksum((char *)&dqb[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) sizeof(struct xfs_dqblk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) XFS_DQUOT_CRC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) xfs_qm_reset_dqcounts_all(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) xfs_dqid_t firstid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) xfs_fsblock_t bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) xfs_filblks_t blkcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct list_head *buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ASSERT(blkcnt > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * Blkcnt arg can be a very big number, and might even be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * larger than the log itself. So, we have to break it up into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * manageable-sized transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * Note that we don't start a permanent transaction here; we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * not be able to get a log reservation for the whole thing up front,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * and we don't really care to either, because we just discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * everything if we were to crash in the middle of this loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) while (blkcnt--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) XFS_FSB_TO_DADDR(mp, bno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) mp->m_quotainfo->qi_dqchunklen, 0, &bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) &xfs_dquot_buf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * CRC and validation errors will return a EFSCORRUPTED here. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * this occurs, re-read without CRC validation so that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * repair the damage via xfs_qm_reset_dqcounts(). This process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * will leave a trace in the log indicating corruption has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * been detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (error == -EFSCORRUPTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) XFS_FSB_TO_DADDR(mp, bno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) mp->m_quotainfo->qi_dqchunklen, 0, &bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * A corrupt buffer might not have a verifier attached, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * make sure we have the correct one attached before writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) bp->b_ops = &xfs_dquot_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) xfs_qm_reset_dqcounts(mp, bp, firstid, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) xfs_buf_delwri_queue(bp, buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* goto the next block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) bno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) firstid += mp->m_quotainfo->qi_dqperchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Iterate over all allocated dquot blocks in this quota inode, zeroing all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * counters for every chunk of dquots that we find.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) xfs_qm_reset_dqcounts_buf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct xfs_inode *qip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct list_head *buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct xfs_bmbt_irec *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) int i, nmaps; /* number of map entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) int error; /* return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) xfs_fileoff_t lblkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) xfs_filblks_t maxlblkcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) xfs_dqid_t firstid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) xfs_fsblock_t rablkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) xfs_filblks_t rablkcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * This looks racy, but we can't keep an inode lock across a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * trans_reserve. But, this gets called during quotacheck, and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * happens only at mount time which is single threaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (qip->i_d.di_nblocks == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) lblkno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) uint lock_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) nmaps = XFS_DQITER_MAP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * We aren't changing the inode itself. Just changing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * some of its data. No new blocks are added here, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * the inode is never added to the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) lock_mode = xfs_ilock_data_map_shared(qip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) map, &nmaps, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) xfs_iunlock(qip, lock_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) for (i = 0; i < nmaps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ASSERT(map[i].br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) lblkno += map[i].br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (map[i].br_startblock == HOLESTARTBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) firstid = (xfs_dqid_t) map[i].br_startoff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) mp->m_quotainfo->qi_dqperchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * Do a read-ahead on the next extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if ((i+1 < nmaps) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) (map[i+1].br_startblock != HOLESTARTBLOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) rablkcnt = map[i+1].br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) rablkno = map[i+1].br_startblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) while (rablkcnt--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) xfs_buf_readahead(mp->m_ddev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) XFS_FSB_TO_DADDR(mp, rablkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) mp->m_quotainfo->qi_dqchunklen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) &xfs_dquot_buf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) rablkno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * Iterate thru all the blks in the extent and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * reset the counters of all the dquots inside them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) error = xfs_qm_reset_dqcounts_all(mp, firstid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) map[i].br_startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) map[i].br_blockcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) type, buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) } while (nmaps > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) kmem_free(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * Called by dqusage_adjust in doing a quotacheck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Given the inode, and a dquot id this updates both the incore dqout as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * as the buffer copy. This is so that once the quotacheck is done, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * just log all the buffers, as opposed to logging numerous updates to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * individual dquots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) xfs_qm_quotacheck_dqadjust(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) xfs_qcnt_t nblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) xfs_qcnt_t rtblks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) xfs_dqid_t id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) id = xfs_qm_id_for_quotatype(ip, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) error = xfs_qm_dqget(mp, id, type, true, &dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * Shouldn't be able to turn off quotas here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ASSERT(error != -ESRCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) ASSERT(error != -ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) trace_xfs_dqadjust(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * Adjust the inode count and the block count to reflect this inode's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * resource usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) dqp->q_ino.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) dqp->q_ino.reserved++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (nblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) dqp->q_blk.count += nblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) dqp->q_blk.reserved += nblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (rtblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) dqp->q_rtb.count += rtblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) dqp->q_rtb.reserved += rtblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * Set default limits, adjust timers (since we changed usages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * There are no timers for the default values set in the root dquot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (dqp->q_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) xfs_qm_adjust_dqlimits(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) xfs_qm_adjust_dqtimers(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) dqp->q_flags |= XFS_DQFLAG_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) xfs_qm_dqput(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * callback routine supplied to bulkstat(). Given an inumber, find its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * dquots and update them to account for resources taken by that inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /* ARGSUSED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) xfs_qm_dqusage_adjust(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) xfs_ino_t ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct xfs_inode *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) xfs_qcnt_t nblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) xfs_filblks_t rtblks = 0; /* total rt blks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ASSERT(XFS_IS_QUOTA_RUNNING(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * rootino must have its resources accounted for, not so with the quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (xfs_is_quota_inode(&mp->m_sb, ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * We don't _need_ to take the ilock EXCL here because quotacheck runs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * at mount time and therefore nobody will be racing chown/chproj.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (error == -EINVAL || error == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ASSERT(ip->i_delayed_blks == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (XFS_IS_REALTIME_INODE(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (!(ifp->if_flags & XFS_IFEXTENTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) xfs_bmap_count_leaves(ifp, &rtblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * Add the (disk blocks and inode) resources occupied by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * inode to its dquots. We do this adjustment in the incore dquot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * and also copy the changes to its buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * We don't care about putting these changes in a transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * envelope because if we crash in the middle of a 'quotacheck'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * we have to start from the beginning anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * Once we're done, we'll log all the dquot bufs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * The *QUOTA_ON checks below may look pretty racy, but quotachecks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * and quotaoffs don't race. (Quotachecks happen at mount time only).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (XFS_IS_UQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) rtblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (XFS_IS_GQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) rtblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (XFS_IS_PQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) rtblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) error0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) xfs_irele(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) xfs_qm_flush_one(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) struct xfs_dquot *dqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct xfs_mount *mp = dqp->q_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) struct list_head *buffer_list = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct xfs_buf *bp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) xfs_dqlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (dqp->q_flags & XFS_DQFLAG_FREEING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (!XFS_DQ_IS_DIRTY(dqp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * The only way the dquot is already flush locked by the time quotacheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * gets here is if reclaim flushed it before the dqadjust walk dirtied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * it for the final time. Quotacheck collects all dquot bufs in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * local delwri queue before dquots are dirtied, so reclaim can't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * possibly queued it for I/O. The only way out is to push the buffer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * cycle the flush lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (!xfs_dqflock_nowait(dqp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* buf is pinned in-core by delwri list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) mp->m_quotainfo->qi_dqchunklen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (!bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) xfs_buf_unlock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) xfs_buf_delwri_pushbuf(bp, buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) xfs_buf_rele(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) error = xfs_qm_dqflush(dqp, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) xfs_buf_delwri_queue(bp, buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * Walk thru all the filesystem inodes and construct a consistent view
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * of the disk quota world. If the quotacheck fails, disable quotas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) xfs_qm_quotacheck(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) xfs_mount_t *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int error, error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) uint flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) LIST_HEAD (buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) ASSERT(uip || gip || pip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ASSERT(XFS_IS_QUOTA_RUNNING(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) xfs_notice(mp, "Quotacheck needed: Please wait.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * their counters to zero. We need a clean slate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * We don't log our changes till later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (uip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) &buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) goto error_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) flags |= XFS_UQUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (gip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) &buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) goto error_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) flags |= XFS_GQUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (pip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) &buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto error_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) flags |= XFS_PQUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) goto error_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * We've made all the changes that we need to make incore. Flush them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * down to disk buffers if everything was updated successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (XFS_IS_UQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) &buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (XFS_IS_GQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) &buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) error = error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (XFS_IS_PQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) &buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) error = error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) error2 = xfs_buf_delwri_submit(&buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) error = error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * We can get this error if we couldn't do a dquot allocation inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * dirty dquots that might be cached, we just want to get rid of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * and turn quotaoff. The dquots won't be attached to any of the inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * at this point (because we intentionally didn't in dqget_noattach).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) goto error_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * If one type of quotas is off, then it will lose its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * quotachecked status, since we won't be doing accounting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * that type anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) mp->m_qflags |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) error_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) xfs_buf_delwri_cancel(&buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * We must turn off quotas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) ASSERT(mp->m_quotainfo != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) xfs_qm_destroy_quotainfo(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (xfs_mount_reset_sbqflags(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) "Quotacheck: Failed to reset quota flags.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) xfs_notice(mp, "Quotacheck: Done.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * This is called from xfs_mountfs to start quotas and initialize all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * necessary data structures like quotainfo. This is also responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * running a quotacheck as necessary. We are guaranteed that the superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * is consistently read in at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * If we fail here, the mount will continue with quota turned off. We don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * need to inidicate success or failure at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) xfs_qm_mount_quotas(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) uint sbf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * If quotas on realtime volumes is not supported, we disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * quotas immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (mp->m_sb.sb_rextents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) mp->m_qflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) goto write_changes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) ASSERT(XFS_IS_QUOTA_RUNNING(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * Allocate the quotainfo structure inside the mount struct, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * create quotainode(s), and change/rev superblock if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) error = xfs_qm_init_quotainfo(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * We must turn off quotas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ASSERT(mp->m_quotainfo == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) mp->m_qflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) goto write_changes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * If any of the quotas are not consistent, do a quotacheck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (XFS_QM_NEED_QUOTACHECK(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) error = xfs_qm_quotacheck(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* Quotacheck failed and disabled quotas. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * If one type of quotas is off, then it will lose its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * quotachecked status, since we won't be doing accounting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * that type anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (!XFS_IS_UQUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) mp->m_qflags &= ~XFS_UQUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (!XFS_IS_GQUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) mp->m_qflags &= ~XFS_GQUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (!XFS_IS_PQUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) mp->m_qflags &= ~XFS_PQUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) write_changes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * We actually don't have to acquire the m_sb_lock at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * This can only be called from mount, and that's single threaded. XXX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) spin_lock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) sbf = mp->m_sb.sb_qflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) spin_unlock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (xfs_sync_sb(mp, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * We could only have been turning quotas off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * We aren't in very good shape actually because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * the incore structures are convinced that quotas are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * off, but the on disk superblock doesn't know that !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) xfs_alert(mp, "%s: Superblock update failed!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) xfs_warn(mp, "Failed to initialize disk quotas.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * This is called after the superblock has been read in and we're ready to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * iget the quota inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) xfs_qm_init_quotainos(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) xfs_mount_t *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct xfs_inode *uip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct xfs_inode *gip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct xfs_inode *pip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) uint flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) ASSERT(mp->m_quotainfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * Get the uquota and gquota inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (xfs_sb_version_hasquota(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (XFS_IS_UQUOTA_ON(mp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) mp->m_sb.sb_uquotino != NULLFSINO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) ASSERT(mp->m_sb.sb_uquotino > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 0, 0, &uip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (XFS_IS_GQUOTA_ON(mp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) mp->m_sb.sb_gquotino != NULLFSINO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) ASSERT(mp->m_sb.sb_gquotino > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 0, 0, &gip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) goto error_rele;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (XFS_IS_PQUOTA_ON(mp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) mp->m_sb.sb_pquotino != NULLFSINO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) ASSERT(mp->m_sb.sb_pquotino > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 0, 0, &pip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) goto error_rele;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) flags |= XFS_QMOPT_SBVERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * Create the three inodes, if they don't exist already. The changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * made above will get added to a transaction and logged in one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * the qino_alloc calls below. If the device is readonly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * temporarily switch to read-write to do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) error = xfs_qm_qino_alloc(mp, &uip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) flags | XFS_QMOPT_UQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) goto error_rele;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) flags &= ~XFS_QMOPT_SBVERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) error = xfs_qm_qino_alloc(mp, &gip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) flags | XFS_QMOPT_GQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) goto error_rele;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) flags &= ~XFS_QMOPT_SBVERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) error = xfs_qm_qino_alloc(mp, &pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) flags | XFS_QMOPT_PQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) goto error_rele;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) mp->m_quotainfo->qi_uquotaip = uip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) mp->m_quotainfo->qi_gquotaip = gip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) mp->m_quotainfo->qi_pquotaip = pip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) error_rele:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (uip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) xfs_irele(uip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (gip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) xfs_irele(gip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (pip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) xfs_irele(pip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) xfs_qm_destroy_quotainos(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct xfs_quotainfo *qi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (qi->qi_uquotaip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) xfs_irele(qi->qi_uquotaip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) qi->qi_uquotaip = NULL; /* paranoia */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (qi->qi_gquotaip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) xfs_irele(qi->qi_gquotaip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) qi->qi_gquotaip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (qi->qi_pquotaip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) xfs_irele(qi->qi_pquotaip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) qi->qi_pquotaip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) xfs_qm_dqfree_one(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) struct xfs_dquot *dqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct xfs_mount *mp = dqp->q_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct xfs_quotainfo *qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) mutex_lock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) qi->qi_dquots--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) mutex_unlock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) xfs_qm_dqdestroy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* --------------- utility functions for vnodeops ---------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) * Given an inode, a uid, gid and prid make sure that we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * allocated relevant dquot(s) on disk, and that we won't exceed inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * quotas by creating this file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * This also attaches dquot(s) to the given inode after locking it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * and returns the dquots corresponding to the uid and/or gid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * in : inode (unlocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * out : udquot, gdquot with references taken and unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) xfs_qm_vop_dqalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) kuid_t uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) kgid_t gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) prid_t prid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) uint flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) struct xfs_dquot **O_udqpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) struct xfs_dquot **O_gdqpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) struct xfs_dquot **O_pdqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) struct inode *inode = VFS_I(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) struct user_namespace *user_ns = inode->i_sb->s_user_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) struct xfs_dquot *uq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct xfs_dquot *gq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct xfs_dquot *pq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) uint lockflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) lockflags = XFS_ILOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) xfs_ilock(ip, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) gid = inode->i_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * Attach the dquot(s) to this inode, doing a dquot allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * if necessary. The dquot(s) will not be locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (XFS_NOT_DQATTACHED(mp, ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) error = xfs_qm_dqattach_locked(ip, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) xfs_iunlock(ip, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) ASSERT(O_udqpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (!uid_eq(inode->i_uid, uid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * What we need is the dquot that has this uid, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * if we send the inode to dqget, the uid of the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * takes priority over what's sent in the uid argument.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * We must unlock inode here before calling dqget if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * we're not sending the inode, because otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * we'll deadlock by doing trans_reserve while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * holding ilock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) xfs_iunlock(ip, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) XFS_DQTYPE_USER, true, &uq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) ASSERT(error != -ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * Get the ilock in the right order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) xfs_dqunlock(uq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) lockflags = XFS_ILOCK_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) xfs_ilock(ip, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * Take an extra reference, because we'll return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * this to caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) ASSERT(ip->i_udquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) uq = xfs_qm_dqhold(ip->i_udquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) ASSERT(O_gdqpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (!gid_eq(inode->i_gid, gid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) xfs_iunlock(ip, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) XFS_DQTYPE_GROUP, true, &gq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) ASSERT(error != -ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) goto error_rele;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) xfs_dqunlock(gq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) lockflags = XFS_ILOCK_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) xfs_ilock(ip, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) ASSERT(ip->i_gdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) gq = xfs_qm_dqhold(ip->i_gdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) ASSERT(O_pdqpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (ip->i_d.di_projid != prid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) xfs_iunlock(ip, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) error = xfs_qm_dqget(mp, prid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) XFS_DQTYPE_PROJ, true, &pq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) ASSERT(error != -ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) goto error_rele;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) xfs_dqunlock(pq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) lockflags = XFS_ILOCK_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) xfs_ilock(ip, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) ASSERT(ip->i_pdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) pq = xfs_qm_dqhold(ip->i_pdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) trace_xfs_dquot_dqalloc(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) xfs_iunlock(ip, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (O_udqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) *O_udqpp = uq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) xfs_qm_dqrele(uq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (O_gdqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) *O_gdqpp = gq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) xfs_qm_dqrele(gq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (O_pdqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) *O_pdqpp = pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) xfs_qm_dqrele(pq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) error_rele:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) xfs_qm_dqrele(gq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) xfs_qm_dqrele(uq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * Actually transfer ownership, and do dquot modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * These were already reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) struct xfs_dquot *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) xfs_qm_vop_chown(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) struct xfs_dquot **IO_olddq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct xfs_dquot *newdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct xfs_dquot *prevdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) uint bfield = XFS_IS_REALTIME_INODE(ip) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /* old dquot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) prevdq = *IO_olddq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ASSERT(prevdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) ASSERT(prevdq != newdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /* the sparkling new dquot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * Take an extra reference, because the inode is going to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * this dquot pointer even after the trans_commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) *IO_olddq = xfs_qm_dqhold(newdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) return prevdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) xfs_qm_vop_chown_reserve(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) struct xfs_dquot *udqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) struct xfs_dquot *gdqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) struct xfs_dquot *pdqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) uint flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) uint64_t delblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) unsigned int blkflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct xfs_dquot *udq_unres = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) struct xfs_dquot *gdq_unres = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) struct xfs_dquot *pdq_unres = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) struct xfs_dquot *udq_delblks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct xfs_dquot *gdq_delblks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct xfs_dquot *pdq_delblks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) ASSERT(XFS_IS_QUOTA_RUNNING(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) delblks = ip->i_delayed_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) blkflags = XFS_IS_REALTIME_INODE(ip) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (XFS_IS_UQUOTA_ON(mp) && udqp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) i_uid_read(VFS_I(ip)) != udqp->q_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) udq_delblks = udqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * If there are delayed allocation blocks, then we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * unreserve those from the old dquot, and add them to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * new dquot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (delblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) ASSERT(ip->i_udquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) udq_unres = ip->i_udquot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) i_gid_read(VFS_I(ip)) != gdqp->q_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) gdq_delblks = gdqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (delblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) ASSERT(ip->i_gdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) gdq_unres = ip->i_gdquot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) ip->i_d.di_projid != pdqp->q_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) pdq_delblks = pdqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (delblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) ASSERT(ip->i_pdquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) pdq_unres = ip->i_pdquot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) udq_delblks, gdq_delblks, pdq_delblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) ip->i_d.di_nblocks, 1, flags | blkflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * Do the delayed blks reservations/unreservations now. Since, these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * are done without the help of a transaction, if a reservation fails
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * its previous reservations won't be automatically undone by trans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * code. So, we have to do it manually here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (delblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * Do the reservations first. Unreservation can't fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) ASSERT(udq_unres || gdq_unres || pdq_unres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) udq_delblks, gdq_delblks, pdq_delblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) (xfs_qcnt_t)delblks, 0, flags | blkflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) udq_unres, gdq_unres, pdq_unres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) -((xfs_qcnt_t)delblks), 0, blkflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) xfs_qm_vop_rename_dqattach(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) struct xfs_inode **i_tab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) struct xfs_mount *mp = i_tab[0]->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) for (i = 0; (i < 4 && i_tab[i]); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) struct xfs_inode *ip = i_tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * Watch out for duplicate entries in the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (i == 0 || ip != i_tab[i-1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (XFS_NOT_DQATTACHED(mp, ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) error = xfs_qm_dqattach(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) xfs_qm_vop_create_dqattach(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) struct xfs_dquot *udqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) struct xfs_dquot *gdqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct xfs_dquot *pdqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (udqp && XFS_IS_UQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) ASSERT(ip->i_udquot == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) ip->i_udquot = xfs_qm_dqhold(udqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) ASSERT(ip->i_gdquot == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) ip->i_gdquot = xfs_qm_dqhold(gdqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) ASSERT(ip->i_pdquot == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) ASSERT(ip->i_d.di_projid == pdqp->q_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) ip->i_pdquot = xfs_qm_dqhold(pdqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)