^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2000-2003 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_defer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_buf_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_trans_space.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_trans_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xfs_qm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "xfs_log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "xfs_bmap_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "xfs_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Lock order:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * ip->i_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * qi->qi_tree_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * dquot->q_qlock (xfs_dqlock() and friends)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * dquot->q_flush (xfs_dqflock() and friends)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * qi->qi_lru_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * If two dquots need to be locked the order is user before group/project,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * otherwise by the lowest id first, see xfs_dqlock2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct kmem_zone *xfs_qm_dqtrxzone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static struct kmem_zone *xfs_qm_dqzone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static struct lock_class_key xfs_dquot_group_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static struct lock_class_key xfs_dquot_project_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * This is called to free all the memory associated with a dquot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) xfs_qm_dqdestroy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct xfs_dquot *dqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ASSERT(list_empty(&dqp->q_lru));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) mutex_destroy(&dqp->q_qlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) kmem_cache_free(xfs_qm_dqzone, dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * If default limits are in force, push them into the dquot now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * We overwrite the dquot limits only if they are zero and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * is not the root dquot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) xfs_qm_adjust_dqlimits(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct xfs_dquot *dq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct xfs_mount *mp = dq->q_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct xfs_quotainfo *q = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct xfs_def_quota *defq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int prealloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ASSERT(dq->q_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) defq = xfs_get_defquota(q, xfs_dquot_type(dq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!dq->q_blk.softlimit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) dq->q_blk.softlimit = defq->blk.soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) prealloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!dq->q_blk.hardlimit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dq->q_blk.hardlimit = defq->blk.hard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) prealloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (!dq->q_ino.softlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) dq->q_ino.softlimit = defq->ino.soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (!dq->q_ino.hardlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dq->q_ino.hardlimit = defq->ino.hard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!dq->q_rtb.softlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) dq->q_rtb.softlimit = defq->rtb.soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!dq->q_rtb.hardlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) dq->q_rtb.hardlimit = defq->rtb.hard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (prealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) xfs_dquot_set_prealloc_limits(dq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* Set the expiration time of a quota's grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) time64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) xfs_dquot_set_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) time64_t timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct xfs_quotainfo *qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return clamp_t(time64_t, timeout, qi->qi_expiry_min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) qi->qi_expiry_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Set the length of the default grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) time64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) xfs_dquot_set_grace_period(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) time64_t grace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Determine if this quota counter is over either limit and set the quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * timers as appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) xfs_qm_adjust_res_timer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct xfs_dquot_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct xfs_quota_limits *qlim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if ((res->softlimit && res->count > res->softlimit) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) (res->hardlimit && res->count > res->hardlimit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (res->timer == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) res->timer = xfs_dquot_set_timeout(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) ktime_get_real_seconds() + qlim->time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (res->timer == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) res->warnings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) res->timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Check the limits and timers of a dquot and start or reset timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * This gets called even when quota enforcement is OFF, which makes our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * life a little less complicated. (We just don't reject any quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * reservations in that case, when enforcement is off).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * enforcement's off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * In contrast, warnings are a little different in that they don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * 'automatically' get started when limits get exceeded. They do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * get reset to zero, however, when we find the count to be under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * the soft limit (they are only ever set non-zero via userspace).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) xfs_qm_adjust_dqtimers(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct xfs_dquot *dq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct xfs_mount *mp = dq->q_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct xfs_quotainfo *qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct xfs_def_quota *defq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ASSERT(dq->q_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * initialize a buffer full of dquots and log the whole thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) xfs_qm_init_dquot_blk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) xfs_dqid_t id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct xfs_quotainfo *q = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct xfs_dqblk *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) xfs_dqid_t curid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned int qflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned int blftype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ASSERT(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ASSERT(xfs_buf_islocked(bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) case XFS_DQTYPE_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) qflag = XFS_UQUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) blftype = XFS_BLF_UDQUOT_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) case XFS_DQTYPE_PROJ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) qflag = XFS_PQUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) blftype = XFS_BLF_PDQUOT_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) case XFS_DQTYPE_GROUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) qflag = XFS_GQUOTA_CHKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) blftype = XFS_BLF_GDQUOT_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) d = bp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * ID of the first dquot in the block - id's are zero based.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) curid = id - (id % q->qi_dqperchunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) memset(d, 0, BBTOB(q->qi_dqchunklen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) d->dd_diskdq.d_id = cpu_to_be32(curid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) d->dd_diskdq.d_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (curid > 0 && xfs_sb_version_hasbigtime(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (xfs_sb_version_hascrc(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) XFS_DQUOT_CRC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) xfs_trans_dquot_buf(tp, bp, blftype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * quotacheck uses delayed writes to update all the dquots on disk in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * efficient manner instead of logging the individual dquot changes as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * they are made. However if we log the buffer allocated here and crash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * after quotacheck while the logged initialisation is still in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * active region of the log, log recovery can replay the dquot buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * initialisation over the top of the checked dquots and corrupt quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * To avoid this problem, quotacheck cannot log the initialised buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * We must still dirty the buffer and write it back before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * allocation transaction clears the log. Therefore, mark the buffer as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * ordered instead of logging it directly. This is safe for quotacheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * because it detects and repairs allocated but initialized dquot blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * in the quota inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (!(mp->m_qflags & qflag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) xfs_trans_ordered_buf(tp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Initialize the dynamic speculative preallocation thresholds. The lo/hi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * watermarks correspond to the soft and hard limits by default. If a soft limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * is not specified, we use 95% of the hard limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) uint64_t space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (!dqp->q_prealloc_lo_wmark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) do_div(dqp->q_prealloc_lo_wmark, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dqp->q_prealloc_lo_wmark *= 95;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) space = dqp->q_prealloc_hi_wmark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) do_div(space, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Ensure that the given in-core dquot has a buffer on disk backing it, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * return the buffer locked and held. This is called when the bmapi finds a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) xfs_dquot_disk_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct xfs_trans **tpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct xfs_dquot *dqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct xfs_buf **bpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct xfs_bmbt_irec map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct xfs_trans *tp = *tpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) xfs_dqtype_t qtype = xfs_dquot_type(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int nmaps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) trace_xfs_dqalloc(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) xfs_ilock(quotip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * Return if this type of quotas is turned off while we didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * have an inode lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) xfs_iunlock(quotip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Create the block mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) &nmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ASSERT(nmaps == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) (map.br_startblock != HOLESTARTBLOCK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Keep track of the blkno to save a lookup later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* now we can just get the buffer (there's nothing to read yet) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mp->m_quotainfo->qi_dqchunklen, 0, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) bp->b_ops = &xfs_dquot_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Make a chunk of dquots out of this buffer and log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * the entire thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) xfs_buf_set_ref(bp, XFS_DQUOT_REF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Hold the buffer and join it to the dfops so that we'll still own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * the buffer when we return to the caller. The buffer disposal on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * error must be paid attention to very carefully, as it has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * code when allocating a new dquot record" in 2005, and the later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * the buffer locked across the _defer_finish call. We can now do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * this correctly with xfs_defer_bjoin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Above, we allocated a disk block for the dquot information and used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * get_buf to initialize the dquot. If the _defer_finish fails, the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * transaction is gone but the new buffer is not joined or held to any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * transaction, so we must _buf_relse it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * If everything succeeds, the caller of this function is returned a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * buffer that is locked and held to the transaction. The caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * is responsible for unlocking any buffer passed back, either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * manually or by committing the transaction. On error, the buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * released and not passed back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) xfs_trans_bhold(tp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) error = xfs_defer_finish(tpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) xfs_trans_bhold_release(*tpp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) xfs_trans_brelse(*tpp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Read in the in-core dquot's on-disk metadata and return the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Returns ENOENT to signal a hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) xfs_dquot_disk_read(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct xfs_dquot *dqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct xfs_buf **bpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct xfs_bmbt_irec map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) xfs_dqtype_t qtype = xfs_dquot_type(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) uint lock_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) int nmaps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) lock_mode = xfs_ilock_data_map_shared(quotip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!xfs_this_quota_on(mp, qtype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * Return if this type of quotas is turned off while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * didn't have the quota inode lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) xfs_iunlock(quotip, lock_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Find the block map; no allocations yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) xfs_iunlock(quotip, lock_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ASSERT(nmaps == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ASSERT(map.br_blockcount >= 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ASSERT(map.br_startblock != DELAYSTARTBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (map.br_startblock == HOLESTARTBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) trace_xfs_dqtobp_read(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * store the blkno etc so that we don't have to do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * mapping all the time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mp->m_quotainfo->qi_dqchunklen, 0, &bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) &xfs_dquot_buf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ASSERT(bp == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ASSERT(xfs_buf_islocked(bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) xfs_buf_set_ref(bp, XFS_DQUOT_REF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* Allocate and initialize everything we need for an incore dquot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) STATIC struct xfs_dquot *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) xfs_dquot_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) xfs_dqid_t id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) xfs_dqtype_t type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) dqp = kmem_cache_zalloc(xfs_qm_dqzone, GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) dqp->q_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) dqp->q_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) dqp->q_mount = mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) INIT_LIST_HEAD(&dqp->q_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) mutex_init(&dqp->q_qlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) init_waitqueue_head(&dqp->q_pinwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * Offset of dquot in the (fixed sized) dquot chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) sizeof(xfs_dqblk_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * Because we want to use a counting completion, complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * the flush completion once to allow a single access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * the flush completion without blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) init_completion(&dqp->q_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) complete(&dqp->q_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * Make sure group quotas have a different lock class than user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * quotas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) case XFS_DQTYPE_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* uses the default lock class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) case XFS_DQTYPE_GROUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) case XFS_DQTYPE_PROJ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) xfs_qm_dquot_logitem_init(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) XFS_STATS_INC(mp, xs_qm_dquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Copy the in-core quota fields in from the on-disk buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) xfs_dquot_from_disk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct xfs_dquot *dqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * Ensure that we got the type and ID we were looking for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * Everything else was checked by the dquot buffer verifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if ((ddqp->d_type & XFS_DQTYPE_REC_MASK) != xfs_dquot_type(dqp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) be32_to_cpu(ddqp->d_id) != dqp->q_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) "Metadata corruption detected at %pS, quota %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) __this_address, dqp->q_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) xfs_alert(bp->b_mount, "Unmount and run xfs_repair");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* copy everything from disk dquot to the incore dquot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dqp->q_type = ddqp->d_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dqp->q_blk.warnings = be16_to_cpu(ddqp->d_bwarns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dqp->q_ino.warnings = be16_to_cpu(ddqp->d_iwarns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dqp->q_rtb.warnings = be16_to_cpu(ddqp->d_rtbwarns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Reservation counters are defined as reservation plus current usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * to avoid having to add every time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) dqp->q_blk.reserved = dqp->q_blk.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dqp->q_ino.reserved = dqp->q_ino.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dqp->q_rtb.reserved = dqp->q_rtb.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* initialize the dquot speculative prealloc thresholds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) xfs_dquot_set_prealloc_limits(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* Copy the in-core quota fields into the on-disk buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) xfs_dquot_to_disk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct xfs_disk_dquot *ddqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct xfs_dquot *dqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ddqp->d_version = XFS_DQUOT_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ddqp->d_type = dqp->q_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) ddqp->d_id = cpu_to_be32(dqp->q_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ddqp->d_pad0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ddqp->d_pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) ddqp->d_bwarns = cpu_to_be16(dqp->q_blk.warnings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ddqp->d_iwarns = cpu_to_be16(dqp->q_ino.warnings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ddqp->d_rtbwarns = cpu_to_be16(dqp->q_rtb.warnings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Allocate and initialize the dquot buffer for this in-core dquot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) xfs_qm_dqread_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct xfs_dquot *dqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct xfs_buf **bpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct xfs_trans *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) goto err_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) error = xfs_trans_commit(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * Buffer was held to the transaction, so we have to unlock it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * manually here because we're not passing it back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) xfs_buf_relse(*bpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) *bpp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) err_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) xfs_trans_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * and release the buffer immediately. If @can_alloc is true, fill any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * holes in the on-disk metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) xfs_qm_dqread(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) xfs_dqid_t id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) bool can_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct xfs_dquot **dqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dqp = xfs_dquot_alloc(mp, id, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) trace_xfs_dqread(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* Try to read the buffer, allocating if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) error = xfs_dquot_disk_read(mp, dqp, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (error == -ENOENT && can_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) error = xfs_qm_dqread_alloc(mp, dqp, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * At this point we should have a clean locked buffer. Copy the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * to the incore dquot and release the buffer since the incore dquot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * has its own locking protocol so we needn't tie up the buffer any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * further.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ASSERT(xfs_buf_islocked(bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) error = xfs_dquot_from_disk(dqp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) *dqpp = dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) trace_xfs_dqread_fail(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) xfs_qm_dqdestroy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) *dqpp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * Advance to the next id in the current chunk, or if at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * end of the chunk, skip ahead to first id in next allocated chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * using the SEEK_DATA interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) xfs_dq_get_next_id(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) xfs_dqid_t *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct xfs_inode *quotip = xfs_quota_inode(mp, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) xfs_dqid_t next_id = *id + 1; /* simple advance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) uint lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct xfs_bmbt_irec got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct xfs_iext_cursor cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) xfs_fsblock_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* If we'd wrap past the max ID, stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (next_id < *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* If new ID is within the current chunk, advancing it sufficed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (next_id % mp->m_quotainfo->qi_dqperchunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) *id = next_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* Nope, next_id is now past the current chunk, so find the next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) lock_flags = xfs_ilock_data_map_shared(quotip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* contiguous chunk, bump startoff for the id calculation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (got.br_startoff < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) got.br_startoff = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) xfs_iunlock(quotip, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * Look up the dquot in the in-core cache. If found, the dquot is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * locked and ready to go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static struct xfs_dquot *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) xfs_qm_dqget_cache_lookup(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct xfs_quotainfo *qi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct radix_tree_root *tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) xfs_dqid_t id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) mutex_lock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) dqp = radix_tree_lookup(tree, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!dqp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) mutex_unlock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) XFS_STATS_INC(mp, xs_qm_dqcachemisses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) xfs_dqlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (dqp->q_flags & XFS_DQFLAG_FREEING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mutex_unlock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) trace_xfs_dqget_freeing(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) delay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) dqp->q_nrefs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) mutex_unlock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) trace_xfs_dqget_hit(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) XFS_STATS_INC(mp, xs_qm_dqcachehits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * Try to insert a new dquot into the in-core cache. If an error occurs the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * caller should throw away the dquot and start over. Otherwise, the dquot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * is returned locked (and held by the cache) as if there had been a cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * hit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) xfs_qm_dqget_cache_insert(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct xfs_quotainfo *qi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct radix_tree_root *tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) xfs_dqid_t id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct xfs_dquot *dqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) mutex_lock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) error = radix_tree_insert(tree, id, dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (unlikely(error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Duplicate found! Caller must try again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) WARN_ON(error != -EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mutex_unlock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) trace_xfs_dqget_dup(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* Return a locked dquot to the caller, with a reference taken. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) xfs_dqlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dqp->q_nrefs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) qi->qi_dquots++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) mutex_unlock(&qi->qi_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /* Check our input parameters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) xfs_qm_dqget_checks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) xfs_dqtype_t type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) case XFS_DQTYPE_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (!XFS_IS_UQUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) case XFS_DQTYPE_GROUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!XFS_IS_GQUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) case XFS_DQTYPE_PROJ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (!XFS_IS_PQUOTA_ON(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) WARN_ON_ONCE(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * Given the file system, id, and type (UDQUOT/GDQUOT/PDQUOT), return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * locked dquot, doing an allocation (if requested) as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) xfs_qm_dqget(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) xfs_dqid_t id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) bool can_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct xfs_dquot **O_dqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct xfs_quotainfo *qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) error = xfs_qm_dqget_checks(mp, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (dqp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) *O_dqpp = dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Duplicate found. Just throw away the new dquot and start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) xfs_qm_dqdestroy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) XFS_STATS_INC(mp, xs_qm_dquot_dups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) trace_xfs_dqget_miss(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *O_dqpp = dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * Given a dquot id and type, read and initialize a dquot from the on-disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * metadata. This function is only for use during quota initialization so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * it ignores the dquot cache assuming that the dquot shrinker isn't set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * The caller is responsible for _qm_dqdestroy'ing the returned dquot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) xfs_qm_dqget_uncached(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) xfs_dqid_t id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct xfs_dquot **dqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) error = xfs_qm_dqget_checks(mp, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return xfs_qm_dqread(mp, id, type, 0, dqpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* Return the quota id for a given inode and type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) xfs_dqid_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) xfs_qm_id_for_quotatype(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) xfs_dqtype_t type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) case XFS_DQTYPE_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return i_uid_read(VFS_I(ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) case XFS_DQTYPE_GROUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return i_gid_read(VFS_I(ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) case XFS_DQTYPE_PROJ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return ip->i_d.di_projid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * Return the dquot for a given inode and type. If @can_alloc is true, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * allocate blocks if needed. The inode's ILOCK must be held and it must not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * have already had an inode attached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) xfs_qm_dqget_inode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) bool can_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct xfs_dquot **O_dqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct xfs_quotainfo *qi = mp->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) xfs_dqid_t id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) error = xfs_qm_dqget_checks(mp, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ASSERT(xfs_inode_dquot(ip, type) == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) id = xfs_qm_id_for_quotatype(ip, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (dqp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) *O_dqpp = dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * Dquot cache miss. We don't want to keep the inode lock across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * a (potential) disk read. Also we don't want to deal with the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * ordering between quotainode and this inode. OTOH, dropping the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * lock here means dealing with a chown that can happen before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * we re-acquire the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) xfs_ilock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * A dquot could be attached to this inode by now, since we had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * dropped the ilock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (xfs_this_quota_on(mp, type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct xfs_dquot *dqp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) dqp1 = xfs_inode_dquot(ip, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (dqp1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) xfs_qm_dqdestroy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) dqp = dqp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) xfs_dqlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) goto dqret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* inode stays locked on return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) xfs_qm_dqdestroy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * Duplicate found. Just throw away the new dquot and start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) xfs_qm_dqdestroy(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) XFS_STATS_INC(mp, xs_qm_dquot_dups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) dqret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) trace_xfs_dqget_miss(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) *O_dqpp = dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * Starting at @id and progressing upwards, look for an initialized incore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * dquot, lock it, and return it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) xfs_qm_dqget_next(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) xfs_dqid_t id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct xfs_dquot **dqpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct xfs_dquot *dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) *dqpp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) error = xfs_qm_dqget(mp, id, type, false, &dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (error == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) else if (error != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) *dqpp = dqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) xfs_qm_dqput(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * Release a reference to the dquot (decrement ref-count) and unlock it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * If there is a group quota attached to this dquot, carefully release that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * too without tripping over deadlocks'n'stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) xfs_qm_dqput(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct xfs_dquot *dqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ASSERT(dqp->q_nrefs > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ASSERT(XFS_DQ_IS_LOCKED(dqp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) trace_xfs_dqput(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (--dqp->q_nrefs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) trace_xfs_dqput_free(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) xfs_dqunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Release a dquot. Flush it if dirty, then dqput() it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * dquot must not be locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) xfs_qm_dqrele(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct xfs_dquot *dqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (!dqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) trace_xfs_dqrele(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) xfs_dqlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * We don't care to flush it if the dquot is dirty here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * That will create stutters that we want to avoid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * Instead we do a delayed write when we try to reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * a dirty dquot. Also xfs_sync will take part of the burden...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) xfs_qm_dqput(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * This is the dquot flushing I/O completion routine. It is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * from interrupt level when the buffer containing the dquot is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * flushed to disk. It is responsible for removing the dquot logitem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * from the AIL if it has not been re-logged, and unlocking the dquot's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * flush lock. This behavior is very similar to that of inodes..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) xfs_qm_dqflush_done(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct xfs_dquot *dqp = qip->qli_dquot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct xfs_ail *ailp = lip->li_ailp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) xfs_lsn_t tail_lsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * We only want to pull the item from the AIL if its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * location in the log has not changed since we started the flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * Thus, we only bother if the dquot's lsn has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * not changed. First we check the lsn outside the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * since it's cheaper, and then we recheck while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * holding the lock before removing the dquot from the AIL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) ((lip->li_lsn == qip->qli_flush_lsn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) test_bit(XFS_LI_FAILED, &lip->li_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) spin_lock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) xfs_clear_li_failed(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (lip->li_lsn == qip->qli_flush_lsn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* xfs_ail_update_finish() drops the AIL lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) tail_lsn = xfs_ail_delete_one(ailp, lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) xfs_ail_update_finish(ailp, tail_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) spin_unlock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * Release the dq's flush lock since we're done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) xfs_dqfunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) xfs_buf_dquot_iodone(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct xfs_log_item *lip, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) list_del_init(&lip->li_bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) xfs_qm_dqflush_done(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) xfs_buf_dquot_io_fail(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct xfs_log_item *lip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) spin_lock(&bp->b_mount->m_ail->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) xfs_set_li_failed(lip, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) spin_unlock(&bp->b_mount->m_ail->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* Check incore dquot for errors before we flush. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static xfs_failaddr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) xfs_qm_dqflush_check(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) struct xfs_dquot *dqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) xfs_dqtype_t type = xfs_dquot_type(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (type != XFS_DQTYPE_USER &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) type != XFS_DQTYPE_GROUP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) type != XFS_DQTYPE_PROJ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (dqp->q_id == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) !dqp->q_blk.timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) !dqp->q_ino.timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) !dqp->q_rtb.timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* bigtime flag should never be set on root dquots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!xfs_sb_version_hasbigtime(&dqp->q_mount->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (dqp->q_id == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * Write a modified dquot to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * The dquot must be locked and the flush lock too taken by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * The flush lock will not be unlocked until the dquot reaches the disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * but the dquot is free to be unlocked and modified by the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * in the interim. Dquot is still locked on return. This behavior is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * identical to that of inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) xfs_qm_dqflush(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct xfs_dquot *dqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct xfs_buf **bpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct xfs_mount *mp = dqp->q_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct xfs_dqblk *dqblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) xfs_failaddr_t fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ASSERT(XFS_DQ_IS_LOCKED(dqp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ASSERT(!completion_done(&dqp->q_flush));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) trace_xfs_dqflush(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) *bpp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) xfs_qm_dqunpin_wait(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * Get the buffer containing the on-disk dquot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) &bp, &xfs_dquot_buf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (error == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) goto out_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) fa = xfs_qm_dqflush_check(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (fa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) dqp->q_id, fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) goto out_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /* Flush the incore dquot to the ondisk buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) dqblk = bp->b_addr + dqp->q_bufoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * Clear the dirty field and remember the flush lsn for later use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) &dqp->q_logitem.qli_item.li_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * copy the lsn into the on-disk dquot now while we have the in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * dquot here. This can't be done later in the write verifier as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * can't get access to the log item at that point in time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * We also calculate the CRC here so that the on-disk dquot in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * buffer always has a valid CRC. This ensures there is no possibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * of a dquot without an up-to-date CRC getting to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (xfs_sb_version_hascrc(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) XFS_DQUOT_CRC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * Attach the dquot to the buffer so that we can remove this dquot from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * the AIL and release the flush lock once the dquot is synced to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) bp->b_flags |= _XBF_DQUOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * If the buffer is pinned then push on the log so we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * get stuck waiting in the write for too long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (xfs_buf_ispinned(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) trace_xfs_dqflush_force(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) xfs_log_force(mp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) trace_xfs_dqflush_done(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) *bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) out_abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) xfs_trans_ail_delete(lip, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) xfs_dqfunlock(dqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * Lock two xfs_dquot structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * To avoid deadlocks we always lock the quota structure with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * the lowerd id first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) xfs_dqlock2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) struct xfs_dquot *d1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct xfs_dquot *d2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (d1 && d2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) ASSERT(d1 != d2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (d1->q_id > d2->q_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) mutex_lock(&d2->q_qlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) mutex_lock(&d1->q_qlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) } else if (d1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) mutex_lock(&d1->q_qlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) } else if (d2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) mutex_lock(&d2->q_qlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) xfs_qm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) xfs_qm_dqzone = kmem_cache_create("xfs_dquot",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) sizeof(struct xfs_dquot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (!xfs_qm_dqzone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) xfs_qm_dqtrxzone = kmem_cache_create("xfs_dqtrx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) sizeof(struct xfs_dquot_acct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (!xfs_qm_dqtrxzone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) goto out_free_dqzone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) out_free_dqzone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) kmem_cache_destroy(xfs_qm_dqzone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) xfs_qm_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) kmem_cache_destroy(xfs_qm_dqtrxzone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) kmem_cache_destroy(xfs_qm_dqzone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * Iterate every dquot of a particular type. The caller must ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * particular quota type is active. iter_fn can return negative error codes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * or -ECANCELED to indicate that it wants to stop iterating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) xfs_qm_dqiterate(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) xfs_qm_dqiterate_fn iter_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct xfs_dquot *dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) xfs_dqid_t id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) error = xfs_qm_dqget_next(mp, id, type, &dq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (error == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) error = iter_fn(dq, type, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) id = dq->q_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) xfs_qm_dqput(dq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) } while (error == 0 && id != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }