^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Open file cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/list_lru.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/fsnotify_backend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/fsnotify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "vfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "nfsd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "nfsfh.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "netns.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "filecache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define NFSDDBG_FACILITY NFSDDBG_FH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* FIXME: dynamically size this for the machine somehow? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define NFSD_FILE_HASH_BITS 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define NFSD_LAUNDRETTE_DELAY (2 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define NFSD_FILE_SHUTDOWN (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define NFSD_FILE_LRU_THRESHOLD (4096UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* We only care about NFSD_MAY_READ/WRITE for this cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct nfsd_fcache_bucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct hlist_head nfb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) spinlock_t nfb_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned int nfb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned int nfb_maxcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct nfsd_fcache_disposal {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct list_head freeme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static struct kmem_cache *nfsd_file_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct kmem_cache *nfsd_file_mark_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static struct list_lru nfsd_file_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static long nfsd_file_lru_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static struct fsnotify_group *nfsd_file_fsnotify_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static atomic_long_t nfsd_filecache_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static struct delayed_work nfsd_filecache_laundrette;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static DEFINE_SPINLOCK(laundrette_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static LIST_HEAD(laundrettes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void nfsd_file_gc(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) nfsd_file_schedule_laundrette(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) long count = atomic_long_read(&nfsd_filecache_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) NFSD_LAUNDRETTE_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) nfsd_file_slab_free(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) put_cred(nf->nf_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) kmem_cache_free(nfsd_file_slab, nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) nfsd_file_mark_free(struct fsnotify_mark *mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) nfm_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kmem_cache_free(nfsd_file_mark_slab, nfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static struct nfsd_file_mark *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) nfsd_file_mark_get(struct nfsd_file_mark *nfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!refcount_inc_not_zero(&nfm->nfm_ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return nfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) nfsd_file_mark_put(struct nfsd_file_mark *nfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (refcount_dec_and_test(&nfm->nfm_ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) fsnotify_put_mark(&nfm->nfm_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static struct nfsd_file_mark *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) nfsd_file_mark_find_or_create(struct nfsd_file *nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct fsnotify_mark *mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct nfsd_file_mark *nfm = NULL, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct inode *inode = nf->nf_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) nfsd_file_fsnotify_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (mark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) nfm = nfsd_file_mark_get(container_of(mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct nfsd_file_mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) nfm_mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (nfm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) fsnotify_put_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Avoid soft lockup race with nfsd_file_mark_put() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) fsnotify_put_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* allocate a new nfm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) refcount_set(&new->nfm_ref, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * If the add was successful, then return the object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Otherwise, we need to put the reference we hold on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * nfm_mark. The fsnotify code will take a reference and put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * it on failure, so we can't just free it directly. It's also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * not safe to call fsnotify_destroy_mark on it as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * mark->group will be NULL. Thus, we can't let the nfm_ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * counter drive the destruction at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) nfm = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) fsnotify_put_mark(&new->nfm_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) } while (unlikely(err == -EEXIST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return nfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (nf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) INIT_HLIST_NODE(&nf->nf_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) INIT_LIST_HEAD(&nf->nf_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) nf->nf_file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) nf->nf_cred = get_current_cred();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) nf->nf_net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) nf->nf_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) nf->nf_inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) nf->nf_hashval = hashval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) refcount_set(&nf->nf_ref, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) nf->nf_may = may & NFSD_FILE_MAY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (may & NFSD_MAY_NOT_BREAK_LEASE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (may & NFSD_MAY_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (may & NFSD_MAY_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) nf->nf_mark = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) init_rwsem(&nf->nf_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) trace_nfsd_file_alloc(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) nfsd_file_free(struct nfsd_file *nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bool flush = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) trace_nfsd_file_put_final(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (nf->nf_mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) nfsd_file_mark_put(nf->nf_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (nf->nf_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) get_file(nf->nf_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) filp_close(nf->nf_file, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) fput(nf->nf_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) nfsd_file_check_writeback(struct nfsd_file *nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct file *file = nf->nf_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!file || !(file->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mapping = file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) nfsd_file_check_write_error(struct nfsd_file *nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct file *file = nf->nf_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!file || !(file->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) nfsd_file_do_unhash(struct nfsd_file *nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) trace_nfsd_file_unhash(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (nfsd_file_check_write_error(nf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) hlist_del_rcu(&nf->nf_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) atomic_long_dec(&nfsd_filecache_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) nfsd_file_unhash(struct nfsd_file *nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) nfsd_file_do_unhash(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (!list_empty(&nf->nf_lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) list_lru_del(&nfsd_file_lru, &nf->nf_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Return true if the file was unhashed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) trace_nfsd_file_unhash_and_release_locked(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!nfsd_file_unhash(nf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* keep final reference for nfsd_file_lru_dispose */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (refcount_dec_not_one(&nf->nf_ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) list_add(&nf->nf_lru, dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) nfsd_file_put_noref(struct nfsd_file *nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) trace_nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (refcount_dec_and_test(&nf->nf_ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) nfsd_file_free(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) nfsd_file_put(struct nfsd_file *nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) bool is_hashed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) nfsd_file_put_noref(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) filemap_flush(nf->nf_file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) nfsd_file_put_noref(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (is_hashed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) nfsd_file_schedule_laundrette();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) nfsd_file_gc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) nfsd_file_get(struct nfsd_file *nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (likely(refcount_inc_not_zero(&nf->nf_ref)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) nfsd_file_dispose_list(struct list_head *dispose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) while(!list_empty(dispose)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) list_del(&nf->nf_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) nfsd_file_put_noref(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) nfsd_file_dispose_list_sync(struct list_head *dispose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) bool flush = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) while(!list_empty(dispose)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) list_del(&nf->nf_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (!refcount_dec_and_test(&nf->nf_ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (nfsd_file_free(nf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) flush_delayed_fput();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) nfsd_file_list_remove_disposal(struct list_head *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct nfsd_fcache_disposal *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) spin_lock(&l->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) list_splice_init(&l->freeme, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) spin_unlock(&l->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct nfsd_fcache_disposal *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) list_for_each_entry_rcu(l, &laundrettes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (l->net == net) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) spin_lock(&l->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) list_splice_tail_init(files, &l->freeme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) spin_unlock(&l->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) queue_work(nfsd_filecache_wq, &l->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct nfsd_file *nf, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) list_for_each_entry_safe(nf, tmp, src, nf_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (nf->nf_net == net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) list_move_tail(&nf->nf_lru, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) nfsd_file_dispose_list_delayed(struct list_head *dispose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) while(!list_empty(dispose)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) nfsd_file_list_add_disposal(&list, nf->nf_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Note this can deadlock with nfsd_file_cache_purge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static enum lru_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) spinlock_t *lock, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) __releases(lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __acquires(lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct list_head *head = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * Do a lockless refcount check. The hashtable holds one reference, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * we look to see if anything else has a reference, or if any have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * been put since the shrinker last ran. Those don't get unhashed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Note that in the put path, we set the flag and then decrement the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * counter. Here we check the counter and then test and clear the flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * That order is deliberate to ensure that we can do this locklessly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (refcount_read(&nf->nf_ref) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) goto out_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Don't throw out files that are still undergoing I/O or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * that have uncleared errors pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (nfsd_file_check_writeback(nf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto out_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto out_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto out_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) list_lru_isolate_move(lru, &nf->nf_lru, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return LRU_REMOVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) out_skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return LRU_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) nfsd_file_lru_walk_list(struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) nfsd_file_lru_cb, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ret = list_lru_walk(&nfsd_file_lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) nfsd_file_lru_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) &head, LONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) list_for_each_entry(nf, &head, nf_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) nfsd_file_do_unhash(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) nfsd_file_dispose_list_delayed(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) nfsd_file_gc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) nfsd_file_lru_walk_list(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) nfsd_file_gc_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) nfsd_file_gc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) nfsd_file_schedule_laundrette();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return list_lru_count(&nfsd_file_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return nfsd_file_lru_walk_list(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static struct shrinker nfsd_file_shrinker = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) .scan_objects = nfsd_file_lru_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) .count_objects = nfsd_file_lru_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) .seeks = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) __nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct list_head *dispose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct hlist_node *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (inode == nf->nf_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) nfsd_file_unhash_and_release_locked(nf, dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * @inode: inode of the file to attempt to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Walk the whole hash bucket, looking for any files that correspond to "inode".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * If any do, then unhash them and put the hashtable reference to them and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * destroy any that had their last reference put. Also ensure that any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * fputs also have their final __fput done as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) nfsd_file_close_inode_sync(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) NFSD_FILE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) LIST_HEAD(dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) __nfsd_file_close_inode(inode, hashval, &dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) nfsd_file_dispose_list_sync(&dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * @inode: inode of the file to attempt to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * Walk the whole hash bucket, looking for any files that correspond to "inode".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * If any do, then unhash them and put the hashtable reference to them and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * destroy any that had their last reference put.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) nfsd_file_close_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) NFSD_FILE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) LIST_HEAD(dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) __nfsd_file_close_inode(inode, hashval, &dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) nfsd_file_dispose_list_delayed(&dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * nfsd_file_delayed_close - close unused nfsd_files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * @work: dummy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * Walk the LRU list and close any entries that have not been used since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * the last scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * Note this can deadlock with nfsd_file_cache_purge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) nfsd_file_delayed_close(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct nfsd_fcache_disposal *l = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct nfsd_fcache_disposal, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) nfsd_file_list_remove_disposal(&head, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) nfsd_file_dispose_list(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct file_lock *fl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* Only close files for F_SETLEASE leases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (fl->fl_flags & FL_LEASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) nfsd_file_close_inode_sync(file_inode(fl->fl_file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static struct notifier_block nfsd_file_lease_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) .notifier_call = nfsd_file_lease_notifier_call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) nfsd_file_fsnotify_handle_event(struct fsnotify_mark *mark, u32 mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct inode *inode, struct inode *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) const struct qstr *name, u32 cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) trace_nfsd_file_fsnotify_handle_event(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /* Should be no marks on non-regular files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!S_ISREG(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* don't close files if this was not the last link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (mask & FS_ATTRIB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (inode->i_nlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) nfsd_file_close_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) .handle_inode_event = nfsd_file_fsnotify_handle_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) .free_mark = nfsd_file_mark_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) nfsd_file_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (nfsd_file_hashtbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!nfsd_filecache_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) nfsd_file_hashtbl = kvcalloc(NFSD_FILE_HASH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!nfsd_file_hashtbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) nfsd_file_slab = kmem_cache_create("nfsd_file",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) sizeof(struct nfsd_file), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!nfsd_file_slab) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pr_err("nfsd: unable to create nfsd_file_slab\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) sizeof(struct nfsd_file_mark), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!nfsd_file_mark_slab) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ret = list_lru_init(&nfsd_file_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ret = register_shrinker(&nfsd_file_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) goto out_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ret = lease_register_notifier(&nfsd_file_lease_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) pr_err("nfsd: unable to register lease notifier: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) goto out_shrinker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (IS_ERR(nfsd_file_fsnotify_group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) pr_err("nfsd: unable to create fsnotify group: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) PTR_ERR(nfsd_file_fsnotify_group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) nfsd_file_fsnotify_group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) goto out_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) out_notifier:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) lease_unregister_notifier(&nfsd_file_lease_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) out_shrinker:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) unregister_shrinker(&nfsd_file_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) out_lru:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) list_lru_destroy(&nfsd_file_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) kmem_cache_destroy(nfsd_file_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) nfsd_file_slab = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) kmem_cache_destroy(nfsd_file_mark_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) nfsd_file_mark_slab = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) kvfree(nfsd_file_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) nfsd_file_hashtbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) destroy_workqueue(nfsd_filecache_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) nfsd_filecache_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * Note this can deadlock with nfsd_file_lru_cb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) nfsd_file_cache_purge(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct hlist_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) LIST_HEAD(dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) bool del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!nfsd_file_hashtbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) spin_lock(&nfb->nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (net && nf->nf_net != net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) del = nfsd_file_unhash_and_release_locked(nf, &dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * Deadlock detected! Something marked this entry as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * unhased, but hasn't removed it from the hash list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) WARN_ON_ONCE(!del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) spin_unlock(&nfb->nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) nfsd_file_dispose_list(&dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static struct nfsd_fcache_disposal *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) nfsd_alloc_fcache_disposal(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct nfsd_fcache_disposal *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) l = kmalloc(sizeof(*l), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (!l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) INIT_WORK(&l->work, nfsd_file_delayed_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) l->net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) spin_lock_init(&l->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) INIT_LIST_HEAD(&l->freeme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) rcu_assign_pointer(l->net, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) cancel_work_sync(&l->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) nfsd_file_dispose_list(&l->freeme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) kfree_rcu(l, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) nfsd_add_fcache_disposal(struct nfsd_fcache_disposal *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) spin_lock(&laundrette_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) list_add_tail_rcu(&l->list, &laundrettes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) spin_unlock(&laundrette_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) nfsd_del_fcache_disposal(struct nfsd_fcache_disposal *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) spin_lock(&laundrette_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) list_del_rcu(&l->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) spin_unlock(&laundrette_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) nfsd_alloc_fcache_disposal_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct nfsd_fcache_disposal *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) l = nfsd_alloc_fcache_disposal(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) nfsd_add_fcache_disposal(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) nfsd_free_fcache_disposal_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct nfsd_fcache_disposal *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) list_for_each_entry_rcu(l, &laundrettes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (l->net != net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) nfsd_del_fcache_disposal(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) nfsd_free_fcache_disposal(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) nfsd_file_cache_start_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return nfsd_alloc_fcache_disposal_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) nfsd_file_cache_shutdown_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) nfsd_file_cache_purge(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) nfsd_free_fcache_disposal_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) nfsd_file_cache_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) lease_unregister_notifier(&nfsd_file_lease_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) unregister_shrinker(&nfsd_file_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * make sure all callers of nfsd_file_lru_cb are done before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * calling nfsd_file_cache_purge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) cancel_delayed_work_sync(&nfsd_filecache_laundrette);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) nfsd_file_cache_purge(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) list_lru_destroy(&nfsd_file_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) fsnotify_put_group(nfsd_file_fsnotify_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) nfsd_file_fsnotify_group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) kmem_cache_destroy(nfsd_file_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) nfsd_file_slab = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) fsnotify_wait_marks_destroyed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) kmem_cache_destroy(nfsd_file_mark_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) nfsd_file_mark_slab = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) kvfree(nfsd_file_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) nfsd_file_hashtbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) destroy_workqueue(nfsd_filecache_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) nfsd_filecache_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) nfsd_match_cred(const struct cred *c1, const struct cred *c2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (!uid_eq(c1->fsuid, c2->fsuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (!gid_eq(c1->fsgid, c2->fsgid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (c1->group_info == NULL || c2->group_info == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return c1->group_info == c2->group_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (c1->group_info->ngroups != c2->group_info->ngroups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) for (i = 0; i < c1->group_info->ngroups; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) unsigned int hashval, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (nf->nf_may != need)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (nf->nf_inode != inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (nf->nf_net != net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (!nfsd_match_cred(nf->nf_cred, current_cred()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (nfsd_file_get(nf) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * nfsd_file_is_cached - are there any cached open files for this fh?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * @inode: inode of the file to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * Scan the hashtable for open files that match this fh. Returns true if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * are any, and false if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) nfsd_file_is_cached(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned int hashval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) nf_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (inode == nf->nf_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) trace_nfsd_file_is_cached(inode, hashval, (int)ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned int may_flags, struct nfsd_file **pnf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct net *net = SVC_NET(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct nfsd_file *nf, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) unsigned int hashval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) bool retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* FIXME: skip this if fh_dentry is already set? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) status = fh_verify(rqstp, fhp, S_IFREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) may_flags|NFSD_MAY_OWNER_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (status != nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) inode = d_inode(fhp->fh_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) goto wait_for_construction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) new = nfsd_file_alloc(inode, may_flags, hashval, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) NULL, nfserr_jukebox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (nf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) goto open_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) nfsd_file_slab_free(&new->nf_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) wait_for_construction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* Did construction of this file fail? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (!retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) status = nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) nfsd_file_put_noref(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) this_cpu_inc(nfsd_file_cache_hits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) bool write = (may_flags & NFSD_MAY_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) status = nfserrno(nfsd_open_break_lease(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) file_inode(nf->nf_file), may_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (status == nfs_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) clear_bit(NFSD_FILE_BREAK_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) &nf->nf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (status == nfs_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) *pnf = nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) nf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) open_file:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) nf = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* Take reference for the hashtable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) refcount_inc(&nf->nf_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) list_lru_add(&nfsd_file_lru, &nf->nf_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ++nfsd_file_hashtbl[hashval].nfb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) nfsd_file_hashtbl[hashval].nfb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) nfsd_file_gc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) nf->nf_mark = nfsd_file_mark_find_or_create(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (nf->nf_mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) status = nfsd_open_verified(rqstp, fhp, S_IFREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) may_flags, &nf->nf_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) status = nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * If construction failed, or we raced with a call to unlink()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * then unhash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (status != nfs_ok || inode->i_nlink == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) bool do_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) do_free = nfsd_file_unhash(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (do_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) nfsd_file_put_noref(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * Note that fields may be added, removed or reordered in the future. Programs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * scraping this file for info should test the labels to ensure they're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * getting the correct field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) unsigned int i, count = 0, longest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) unsigned long hits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * No need for spinlocks here since we're not terribly interested in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * accuracy. We do take the nfsd_mutex simply to ensure that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * don't end up racing with server shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) mutex_lock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (nfsd_file_hashtbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) count += nfsd_file_hashtbl[i].nfb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) mutex_unlock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) hits += per_cpu(nfsd_file_cache_hits, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) seq_printf(m, "total entries: %u\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) seq_printf(m, "longest chain: %u\n", longest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) seq_printf(m, "cache hits: %lu\n", hits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return single_open(file, nfsd_file_cache_stats_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }