^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Quota change tags are associated with each transaction that allocates or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * deallocates space. Those changes are accumulated locally to each node (in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * per-node file) and then are periodically synced to the quota file. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * avoids the bottleneck of constantly touching the quota file, but introduces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * fuzziness in the current usage value of IDs that are being used on different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * nodes in the cluster simultaneously. So, it is possible for a user on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * multiple nodes to overrun their quota, but that overrun is controlable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Since quota tags are part of transactions, there is no need for a quota check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * program to be run on node crashes or anything like that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * There are couple of knobs that let the administrator manage the quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * fuzziness. "quota_quantum" sets the maximum time a quota change can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * sitting on one node before being synced to the quota file. (The default is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * of quota file syncs increases as the user moves closer to their limit. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * more frequent the syncs, the more accurate the quota enforcement, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * means that there is more contention between the nodes for the quota file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * The default value is one. This sets the maximum theoretical quota overrun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * (with infinite node with infinite bandwidth) to twice the user's limit. (In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * practice, the maximum overrun you see should be much less.) A "quota_scale"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * number greater than one makes quota syncs more frequent and reduces the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * maximum overrun. Numbers less than one (but greater than zero) make quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * syncs less frequent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * the quota file, so it is not being constantly read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/gfs2_ondisk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/quota.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/dqblk_xfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/lockref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/list_lru.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/rculist_bl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/bit_spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include "gfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include "incore.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include "bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include "glock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include "glops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include "log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include "meta_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include "quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include "rgrp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include "trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define GFS2_QD_HASH_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* -> sd_bitmap_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static DEFINE_SPINLOCK(qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct list_lru gfs2_qd_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) const struct kqid qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) h = jhash(&qid, sizeof(struct kqid), h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return h & GFS2_QD_HASH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline void spin_lock_bucket(unsigned int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) hlist_bl_lock(&qd_hash_table[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline void spin_unlock_bucket(unsigned int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) hlist_bl_unlock(&qd_hash_table[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void gfs2_qd_dealloc(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) kmem_cache_free(gfs2_quotad_cachep, qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void gfs2_qd_dispose(struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct gfs2_sbd *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) while (!list_empty(list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) sdp = qd->qd_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) list_del(&qd->qd_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Free from the filesystem-specific list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) list_del(&qd->qd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_lock_bucket(qd->qd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) hlist_bl_del_rcu(&qd->qd_hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) spin_unlock_bucket(qd->qd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) gfs2_assert_warn(sdp, !qd->qd_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) gfs2_assert_warn(sdp, !qd->qd_slot_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) gfs2_assert_warn(sdp, !qd->qd_bh_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) gfs2_glock_put(qd->qd_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) atomic_dec(&sdp->sd_quota_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Delete it from the common reclaim list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static enum lru_status gfs2_qd_isolate(struct list_head *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct list_head *dispose = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (!spin_trylock(&qd->qd_lockref.lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return LRU_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (qd->qd_lockref.count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) lockref_mark_dead(&qd->qd_lockref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) list_lru_isolate_move(lru, &qd->qd_lru, dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) spin_unlock(&qd->qd_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return LRU_REMOVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) LIST_HEAD(dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned long freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!(sc->gfp_mask & __GFP_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return SHRINK_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) gfs2_qd_isolate, &dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) gfs2_qd_dispose(&dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct shrinker gfs2_qd_shrinker = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .count_objects = gfs2_qd_shrink_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .scan_objects = gfs2_qd_shrink_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .seeks = DEFAULT_SEEKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .flags = SHRINKER_NUMA_AWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static u64 qd2index(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct kqid qid = qd->qd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return (2 * (u64)from_kqid(&init_user_ns, qid)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ((qid.type == USRQUOTA) ? 0 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static u64 qd2offset(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) offset = qd2index(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) offset *= sizeof(struct gfs2_quota);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) qd->qd_sbd = sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) qd->qd_lockref.count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) spin_lock_init(&qd->qd_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) qd->qd_id = qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) qd->qd_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) INIT_LIST_HEAD(&qd->qd_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) qd->qd_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) error = gfs2_glock_get(sdp, qd2index(qd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) &gfs2_quota_glops, CREATE, &qd->qd_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) kmem_cache_free(gfs2_quotad_cachep, qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) const struct gfs2_sbd *sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct kqid qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct hlist_bl_node *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!qid_eq(qd->qd_id, qid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (qd->qd_sbd != sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (lockref_get_not_dead(&qd->qd_lockref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct gfs2_quota_data **qdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct gfs2_quota_data *qd, *new_qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int hash = gfs2_qd_hash(sdp, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) new_qd = qd_alloc(hash, sdp, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!new_qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) spin_lock_bucket(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (qd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *qdp = new_qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) list_add(&new_qd->qd_list, &sdp->sd_quota_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) atomic_inc(&sdp->sd_quota_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_unlock_bucket(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (qd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) gfs2_glock_put(new_qd->qd_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) kmem_cache_free(gfs2_quotad_cachep, new_qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void qd_hold(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) lockref_get(&qd->qd_lockref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void qd_put(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (lockref_put_or_lock(&qd->qd_lockref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) qd->qd_lockref.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) spin_unlock(&qd->qd_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int slot_get(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct gfs2_sbd *sdp = qd->qd_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) spin_lock(&sdp->sd_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (qd->qd_slot_count != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) error = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (bit < sdp->sd_quota_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) set_bit(bit, sdp->sd_quota_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) qd->qd_slot = bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) qd->qd_slot_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) spin_unlock(&sdp->sd_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void slot_hold(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct gfs2_sbd *sdp = qd->qd_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) spin_lock(&sdp->sd_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) gfs2_assert(sdp, qd->qd_slot_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) qd->qd_slot_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) spin_unlock(&sdp->sd_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void slot_put(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct gfs2_sbd *sdp = qd->qd_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) spin_lock(&sdp->sd_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) gfs2_assert(sdp, qd->qd_slot_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!--qd->qd_slot_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) qd->qd_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) spin_unlock(&sdp->sd_bitmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static int bh_get(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int block, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) mutex_lock(&sdp->sd_quota_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (qd->qd_bh_count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) mutex_unlock(&sdp->sd_quota_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) block = qd->qd_slot / sdp->sd_qc_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) offset = qd->qd_slot % sdp->sd_qc_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) bh_map.b_size = BIT(ip->i_inode.i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto fail_brelse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) qd->qd_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) qd->qd_bh_qc = (struct gfs2_quota_change *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) (bh->b_data + sizeof(struct gfs2_meta_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) offset * sizeof(struct gfs2_quota_change));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) mutex_unlock(&sdp->sd_quota_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) fail_brelse:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) qd->qd_bh_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mutex_unlock(&sdp->sd_quota_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static void bh_put(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) mutex_lock(&sdp->sd_quota_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) gfs2_assert(sdp, qd->qd_bh_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!--qd->qd_bh_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) brelse(qd->qd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) qd->qd_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) qd->qd_bh_qc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mutex_unlock(&sdp->sd_quota_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u64 *sync_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) !test_bit(QDF_CHANGE, &qd->qd_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!lockref_get_not_dead(&qd->qd_lockref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) set_bit(QDF_LOCKED, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) qd->qd_change_sync = qd->qd_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) slot_hold(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct gfs2_quota_data *qd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) *qdp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (sb_rdonly(sdp->sd_vfs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) qd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (qd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) gfs2_assert_warn(sdp, qd->qd_change_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) error = bh_get(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) clear_bit(QDF_LOCKED, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) slot_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) qd_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *qdp = qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void qd_unlock(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) test_bit(QDF_LOCKED, &qd->qd_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) clear_bit(QDF_LOCKED, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) bh_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) slot_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) qd_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct gfs2_quota_data **qdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) error = qd_get(sdp, qid, qdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) error = slot_get(*qdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) error = bh_get(*qdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto fail_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) fail_slot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) slot_put(*qdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) qd_put(*qdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static void qdsb_put(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) bh_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) slot_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) qd_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * gfs2_qa_get - make sure we have a quota allocations data structure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * @ip: the inode for this reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int gfs2_qa_get(struct gfs2_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) down_write(&ip->i_rw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (ip->i_qadata == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!ip->i_qadata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ip->i_qadata->qa_ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) up_write(&ip->i_rw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) void gfs2_qa_put(struct gfs2_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) down_write(&ip->i_rw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ip->i_qadata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) up_write(&ip->i_rw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct gfs2_quota_data **qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) error = gfs2_qa_get(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) qd = ip->i_qadata->qa_qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto out_unhold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ip->i_qadata->qa_qd_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) qd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) goto out_unhold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ip->i_qadata->qa_qd_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) qd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) !uid_eq(uid, ip->i_inode.i_uid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) error = qdsb_get(sdp, make_kqid_uid(uid), qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) goto out_unhold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ip->i_qadata->qa_qd_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) qd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) !gid_eq(gid, ip->i_inode.i_gid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) error = qdsb_get(sdp, make_kqid_gid(gid), qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) goto out_unhold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ip->i_qadata->qa_qd_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) qd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) out_unhold:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) gfs2_quota_unhold(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) void gfs2_quota_unhold(struct gfs2_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) u32 x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (ip->i_qadata == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) qdsb_put(ip->i_qadata->qa_qd[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ip->i_qadata->qa_qd[x] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ip->i_qadata->qa_qd_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) gfs2_qa_put(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static int sort_qd(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (qid_lt(qd_a->qd_id, qd_b->qd_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (qid_lt(qd_b->qd_id, qd_a->qd_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static void do_qc(struct gfs2_quota_data *qd, s64 change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct gfs2_quota_change *qc = qd->qd_bh_qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) s64 x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) mutex_lock(&sdp->sd_quota_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) qc->qc_change = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) qc->qc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (qd->qd_id.type == USRQUOTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) x = be64_to_cpu(qc->qc_change) + change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) qc->qc_change = cpu_to_be64(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) qd->qd_change = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!x) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) clear_bit(QDF_CHANGE, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) qc->qc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) qc->qc_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) slot_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) qd_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) qd_hold(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) slot_hold(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (change < 0) /* Reset quiet flag if we freed some blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) mutex_unlock(&sdp->sd_quota_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) unsigned off, void *buf, unsigned bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct inode *inode = &ip->i_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct gfs2_sbd *sdp = GFS2_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) u64 blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) unsigned to_write = bytes, pg_off = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) boff = off % bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) page = find_or_create_page(mapping, index, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!page_has_buffers(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) create_empty_buffers(page, bsize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) bh = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) while (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Find the beginning block within the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (pg_off >= ((bnum * bsize) + bsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) bnum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) blk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!buffer_mapped(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) gfs2_block_map(inode, blk, bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (!buffer_mapped(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* If it's a newly allocated disk block, zero it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (buffer_new(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) zero_user(page, bnum * bsize, bh->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (!buffer_uptodate(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) wait_on_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (!buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (gfs2_is_jdata(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) gfs2_trans_add_data(ip->i_gl, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) gfs2_ordered_add_inode(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* If we need to write to the next block as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (to_write > (bsize - boff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) pg_off += (bsize - boff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) to_write -= (bsize - boff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) boff = pg_off % bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* Write to the page, now that we have setup the buffer(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) memcpy(kaddr + off, buf, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) loff_t loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) unsigned long pg_beg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) unsigned pg_off, nbytes, overflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) int pg_oflow = 0, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) nbytes = sizeof(struct gfs2_quota);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) pg_beg = loc >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) pg_off = offset_in_page(loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* If the quota straddles a page boundary, split the write in two */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if ((pg_off + nbytes) > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) pg_oflow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) overflow = (pg_off + nbytes) - PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ptr = qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) nbytes - overflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* If there's an overflow, write the remaining bytes to the next page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (!error && pg_oflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ptr + nbytes - overflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) overflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * gfs2_adjust_quota - adjust record of current block usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * @ip: The quota inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * @loc: Offset of the entry in the quota file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * @change: The amount of usage change to record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * @qd: The quota data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * @fdq: The updated limits to record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * This function was mostly borrowed from gfs2_block_truncate_page which was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * in turn mostly borrowed from ext3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * Returns: 0 or -ve on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) s64 change, struct gfs2_quota_data *qd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct qc_dqblk *fdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct inode *inode = &ip->i_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct gfs2_sbd *sdp = GFS2_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct gfs2_quota q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (gfs2_is_stuffed(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) err = gfs2_unstuff_dinode(ip, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) memset(&q, 0, sizeof(struct gfs2_quota));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) be64_add_cpu(&q.qu_value, change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (((s64)be64_to_cpu(q.qu_value)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) q.qu_value = 0; /* Never go negative on quota usage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) qd->qd_qb.qb_value = q.qu_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (fdq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (fdq->d_fieldmask & QC_SPC_SOFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) qd->qd_qb.qb_warn = q.qu_warn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (fdq->d_fieldmask & QC_SPC_HARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) qd->qd_qb.qb_limit = q.qu_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (fdq->d_fieldmask & QC_SPACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) qd->qd_qb.qb_value = q.qu_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) err = gfs2_write_disk_quota(ip, &q, loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) size = loc + sizeof(struct gfs2_quota);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (size > inode->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) i_size_write(inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) inode->i_mtime = inode->i_atime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) set_bit(QDF_REFRESH, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct gfs2_alloc_parms ap = { .aflags = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) unsigned int data_blocks, ind_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct gfs2_holder *ghs, i_gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) unsigned int qx, x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) unsigned reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) loff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) unsigned int nalloc = 0, blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) error = gfs2_qa_get(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) &data_blocks, &ind_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (!ghs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) inode_lock(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) for (qx = 0; qx < num_qd; qx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) GL_NOCACHE, &ghs[qx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) goto out_dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) goto out_dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) for (x = 0; x < num_qd; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) offset = qd2offset(qda[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (gfs2_write_alloc_required(ip, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) sizeof(struct gfs2_quota)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) nalloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * 1 blk for unstuffing inode if stuffed. We add this extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * block to the reservation unconditionally. If the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * doesn't need unstuffing, the block will be released to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * rgrp since it won't be allocated during the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* +3 in the end for unstuffing block, inode size update block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * and another block in case quota straddles page boundary and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * two blocks need to be updated instead of 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) reserved = 1 + (nalloc * (data_blocks + ind_blocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ap.target = reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) error = gfs2_inplace_reserve(ip, &ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) goto out_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (nalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) error = gfs2_trans_begin(sdp, blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) goto out_ipres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) for (x = 0; x < num_qd; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) qd = qda[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) offset = qd2offset(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) goto out_end_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) do_qc(qd, -qd->qd_change_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) set_bit(QDF_REFRESH, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) out_end_trans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) gfs2_trans_end(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) out_ipres:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) gfs2_inplace_release(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) out_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) gfs2_glock_dq_uninit(&i_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) out_dq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) while (qx--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) gfs2_glock_dq_uninit(&ghs[qx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) inode_unlock(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) kfree(ghs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) gfs2_qa_put(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct gfs2_quota q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct gfs2_quota_lvb *qlvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) loff_t pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) memset(&q, 0, sizeof(struct gfs2_quota));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) pos = qd2offset(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) qlvb->__pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) qlvb->qb_limit = q.qu_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) qlvb->qb_warn = q.qu_warn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) qlvb->qb_value = q.qu_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) qd->qd_qb = *qlvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct gfs2_holder *q_gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct gfs2_holder i_gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) force_refresh = FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) gfs2_glock_dq_uninit(q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) GL_NOCACHE, q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) error = update_qd(sdp, qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) goto fail_gunlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) gfs2_glock_dq_uninit(&i_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) gfs2_glock_dq_uninit(q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) force_refresh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) fail_gunlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) gfs2_glock_dq_uninit(&i_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) gfs2_glock_dq_uninit(q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) u32 x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) error = gfs2_quota_hold(ip, uid, gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) sizeof(struct gfs2_quota_data *), sort_qd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) qd = ip->i_qadata->qa_qd[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) set_bit(GIF_QD_LOCKED, &ip->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) while (x--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) gfs2_quota_unhold(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static int need_sync(struct gfs2_quota_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct gfs2_tune *gt = &sdp->sd_tune;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) s64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) unsigned int num, den;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) int do_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!qd->qd_qb.qb_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) value = qd->qd_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) spin_lock(>->gt_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) num = gt->gt_quota_scale_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) den = gt->gt_quota_scale_den;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) spin_unlock(>->gt_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) do_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) (s64)be64_to_cpu(qd->qd_qb.qb_limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) do_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) value *= gfs2_jindex_size(sdp) * num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) value = div_s64(value, den);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) do_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return do_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) void gfs2_quota_unlock(struct gfs2_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct gfs2_quota_data *qda[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) u32 x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) int sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) qd = ip->i_qadata->qa_qd[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) sync = need_sync(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) found = qd_check_sync(sdp, qd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) gfs2_assert_warn(sdp, qd->qd_change_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (bh_get(qd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) clear_bit(QDF_LOCKED, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) slot_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) qd_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) qda[count++] = qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) do_sync(count, qda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) for (x = 0; x < count; x++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) qd_unlock(qda[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) gfs2_quota_unhold(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) #define MAX_LINE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static int print_message(struct gfs2_quota_data *qd, char *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) fs_info(sdp, "quota %s for %s %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) (qd->qd_id.type == USRQUOTA) ? "user" : "group",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) from_kqid(&init_user_ns, qd->qd_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * gfs2_quota_check - check if allocating new blocks will exceed quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * @ip: The inode for which this check is being performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * @uid: The uid to check against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * @gid: The gid to check against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * @ap: The allocation parameters. ap->target contains the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * blocks. ap->min_target, if set, contains the minimum blks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * Returns: 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * min_req = ap->min_target ? ap->min_target : ap->target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * quota must allow at least min_req blks for success and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * ap->allowed is set to the number of blocks allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * -EDQUOT otherwise, quota violation. ap->allowed is set to number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * of blocks available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) struct gfs2_alloc_parms *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) s64 value, warn, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) u32 x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) qd = ip->i_qadata->qa_qd[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) qid_eq(qd->qd_id, make_kqid_gid(gid))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) value += qd->qd_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (limit > 0 && (limit - value) < ap->allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) ap->allowed = limit - value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* If we can't meet the target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (limit && limit < (value + (s64)ap->target)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /* If no min_target specified or we don't meet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * min_target, return -EDQUOT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!ap->min_target || ap->min_target > ap->allowed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (!test_and_set_bit(QDF_QMSG_QUIET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) &qd->qd_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) print_message(qd, "exceeded");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) quota_send_warning(qd->qd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) sdp->sd_vfs->s_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) QUOTA_NL_BHARDWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) error = -EDQUOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) } else if (warn && warn < value &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) time_after_eq(jiffies, qd->qd_last_warn +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) gfs2_tune_get(sdp, gt_quota_warn_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) quota_send_warning(qd->qd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) error = print_message(qd, "warning");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) qd->qd_last_warn = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) kuid_t uid, kgid_t gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) u32 x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) gfs2_assert_warn(sdp, change))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (ip->i_diskflags & GFS2_DIF_SYSTEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) ip->i_qadata->qa_ref > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) qd = ip->i_qadata->qa_qd[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) qid_eq(qd->qd_id, make_kqid_gid(gid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) do_qc(qd, change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) int gfs2_quota_sync(struct super_block *sb, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct gfs2_sbd *sdp = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct gfs2_quota_data **qda;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) unsigned int num_qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) unsigned int x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (!qda)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) mutex_lock(&sdp->sd_quota_sync_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) sdp->sd_quota_sync_gen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) num_qd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) error = qd_fish(sdp, qda + num_qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (error || !qda[num_qd])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (++num_qd == max_qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (num_qd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) error = do_sync(num_qd, qda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) for (x = 0; x < num_qd; x++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) qda[x]->qd_sync_gen =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) sdp->sd_quota_sync_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) for (x = 0; x < num_qd; x++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) qd_unlock(qda[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) } while (!error && num_qd == max_qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) mutex_unlock(&sdp->sd_quota_sync_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) kfree(qda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct gfs2_holder q_gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) error = qd_get(sdp, qid, &qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) error = do_glock(qd, FORCE, &q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) gfs2_glock_dq_uninit(&q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) qd_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int gfs2_quota_init(struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) u64 size = i_size_read(sdp->sd_qc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) unsigned int x, slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) unsigned int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) unsigned int bm_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) u64 dblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) u32 extlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) bm_size *= sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (sdp->sd_quota_bitmap == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (!sdp->sd_quota_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) for (x = 0; x < blocks; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) const struct gfs2_quota_change *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) unsigned int y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (!extlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) int new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) y++, slot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) s64 qc_change = be64_to_cpu(qc->qc_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) u32 qc_flags = be32_to_cpu(qc->qc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) USRQUOTA : GRPQUOTA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct kqid qc_id = make_kqid(&init_user_ns, qtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) be32_to_cpu(qc->qc_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) qc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (!qc_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) hash = gfs2_qd_hash(sdp, qc_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) qd = qd_alloc(hash, sdp, qc_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (qd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) set_bit(QDF_CHANGE, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) qd->qd_change = qc_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) qd->qd_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) qd->qd_slot_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) list_add(&qd->qd_list, &sdp->sd_quota_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) atomic_inc(&sdp->sd_quota_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) spin_lock_bucket(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) spin_unlock_bucket(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) dblock++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) extlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) fs_info(sdp, "found %u quota changes\n", found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) gfs2_quota_cleanup(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct list_head *head = &sdp->sd_quota_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) while (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) list_del(&qd->qd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* Also remove if this qd exists in the reclaim list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) atomic_dec(&sdp->sd_quota_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) spin_lock_bucket(qd->qd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) hlist_bl_del_rcu(&qd->qd_hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) spin_unlock_bucket(qd->qd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) gfs2_assert_warn(sdp, !qd->qd_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) gfs2_assert_warn(sdp, !qd->qd_slot_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) gfs2_assert_warn(sdp, !qd->qd_bh_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) gfs2_glock_put(qd->qd_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) spin_lock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) spin_unlock(&qd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) kvfree(sdp->sd_quota_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) sdp->sd_quota_bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (error == 0 || error == -EROFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (!gfs2_withdrawn(sdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (!cmpxchg(&sdp->sd_log_error, 0, error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) wake_up(&sdp->sd_logd_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) int (*fxn)(struct super_block *sb, int type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) unsigned long t, unsigned long *timeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) unsigned int *new_timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (t >= *timeo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) int error = fxn(sdp->sd_vfs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) quotad_error(sdp, msg, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) *timeo -= t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) struct gfs2_inode *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) while(1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) ip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) spin_lock(&sdp->sd_trunc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (!list_empty(&sdp->sd_trunc_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) ip = list_first_entry(&sdp->sd_trunc_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct gfs2_inode, i_trunc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) list_del_init(&ip->i_trunc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) spin_unlock(&sdp->sd_trunc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (ip == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) gfs2_glock_finish_truncate(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (!sdp->sd_statfs_force_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) sdp->sd_statfs_force_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) wake_up(&sdp->sd_quota_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * gfs2_quotad - Write cached quota changes into the quota file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * @sdp: Pointer to GFS2 superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) int gfs2_quotad(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct gfs2_sbd *sdp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct gfs2_tune *tune = &sdp->sd_tune;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) unsigned long statfs_timeo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) unsigned long quotad_timeo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) unsigned long t = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) int empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (gfs2_withdrawn(sdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) goto bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /* Update the master statfs file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (sdp->sd_statfs_force_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) quotad_error(sdp, "statfs", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) &statfs_timeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) &tune->gt_statfs_quantum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /* Update quota file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) "ad_timeo, &tune->gt_quota_quantum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /* Check for & recover partially truncated inodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) quotad_check_trunc_list(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) try_to_freeze();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) bypass:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) t = min(quotad_timeo, statfs_timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) spin_lock(&sdp->sd_trunc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) empty = list_empty(&sdp->sd_trunc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) spin_unlock(&sdp->sd_trunc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (empty && !sdp->sd_statfs_force_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) t -= schedule_timeout(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) t = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) finish_wait(&sdp->sd_quota_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) struct gfs2_sbd *sdp = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) memset(state, 0, sizeof(*state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) switch (sdp->sd_args.ar_quota) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) case GFS2_QUOTA_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) case GFS2_QUOTA_ACCOUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) QCI_SYSFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) QCI_SYSFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) case GFS2_QUOTA_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (sdp->sd_quota_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) state->s_state[USRQUOTA].ino =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) GFS2_I(sdp->sd_quota_inode)->i_no_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) state->s_state[USRQUOTA].nextents = 1; /* unsupported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) struct qc_dqblk *fdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) struct gfs2_sbd *sdp = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct gfs2_quota_lvb *qlvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) struct gfs2_holder q_gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) memset(fdq, 0, sizeof(*fdq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) return -ESRCH; /* Crazy XFS error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if ((qid.type != USRQUOTA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) (qid.type != GRPQUOTA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) error = qd_get(sdp, qid, &qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) error = do_glock(qd, FORCE, &q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) gfs2_glock_dq_uninit(&q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) qd_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /* GFS2 only supports a subset of the XFS fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) struct qc_dqblk *fdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct gfs2_sbd *sdp = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct gfs2_quota_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct gfs2_holder q_gh, i_gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) unsigned int data_blocks, ind_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) unsigned int blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) int alloc_required;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) loff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return -ESRCH; /* Crazy XFS error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if ((qid.type != USRQUOTA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) (qid.type != GRPQUOTA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) error = qd_get(sdp, qid, &qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) error = gfs2_qa_get(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) inode_lock(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) goto out_unlockput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) goto out_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /* Check for existing entry, if none then alloc new blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) error = update_qd(sdp, qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) goto out_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) /* If nothing has changed, this is a no-op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) fdq->d_fieldmask ^= QC_SPC_SOFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if ((fdq->d_fieldmask & QC_SPC_HARD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) fdq->d_fieldmask ^= QC_SPC_HARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if ((fdq->d_fieldmask & QC_SPACE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) fdq->d_fieldmask ^= QC_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (fdq->d_fieldmask == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) goto out_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) offset = qd2offset(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (gfs2_is_stuffed(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) alloc_required = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (alloc_required) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) struct gfs2_alloc_parms ap = { .aflags = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) &data_blocks, &ind_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) blocks = 1 + data_blocks + ind_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) ap.target = blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) error = gfs2_inplace_reserve(ip, &ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) goto out_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) blocks += gfs2_rg_blocks(ip, blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) /* Some quotas span block boundaries and can update two blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) adding an extra block to the transaction to handle such quotas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) /* Apply changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) gfs2_trans_end(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (alloc_required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) gfs2_inplace_release(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) out_i:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) gfs2_glock_dq_uninit(&i_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) out_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) gfs2_glock_dq_uninit(&q_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) out_unlockput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) gfs2_qa_put(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) inode_unlock(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) qd_put(qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) const struct quotactl_ops gfs2_quotactl_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) .quota_sync = gfs2_quota_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) .get_state = gfs2_quota_get_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) .get_dqblk = gfs2_get_dqblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) .set_dqblk = gfs2_set_dqblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) void __init gfs2_quota_hash_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }