^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * User extended attribute client side cache functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Frank van der Linden <fllinden@amazon.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/nfs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/hashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <uapi/linux/xattr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "nfs4_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * User extended attributes client side caching is implemented by having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * a cache structure attached to NFS inodes. This structure is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * when needed, and freed when the cache is zapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * The cache structure contains as hash table of entries, and a pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * to a special-cased entry for the listxattr cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Accessing and allocating / freeing the caches is done via reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * counting. The cache entries use a similar refcounting scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * This makes freeing a cache, both from the shrinker and from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * zap cache path, easy. It also means that, in current use cases,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * the large majority of inodes will not waste any memory, as they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * will never have any user extended attributes assigned to them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Attribute entries are hashed in to a simple hash table. They are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * also part of an LRU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * There are three shrinkers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Two shrinkers deal with the cache entries themselves: one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * large entries (> PAGE_SIZE), and one for smaller entries. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * shrinker for the larger entries works more aggressively than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * those for the smaller entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * The other shrinker frees the cache structures themselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * 64 buckets is a good default. There is likely no reasonable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * workload that uses more than even 64 user extended attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * You can certainly add a lot more - but you get what you ask for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * in those circumstances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define NFS4_XATTR_HASH_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define NFSDBG_FACILITY NFSDBG_XATTRCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct nfs4_xattr_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct nfs4_xattr_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct nfs4_xattr_bucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct hlist_head hlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) bool draining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct nfs4_xattr_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct list_head lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct list_head dispose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) atomic_long_t nent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) spinlock_t listxattr_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct nfs4_xattr_entry *listxattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct nfs4_xattr_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct hlist_node hnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct list_head lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct list_head dispose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) char *xattr_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void *xattr_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) size_t xattr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct nfs4_xattr_bucket *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) uint32_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define NFS4_XATTR_ENTRY_EXTVAL 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * LRU list of NFS inodes that have xattr caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static struct list_lru nfs4_xattr_cache_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static struct list_lru nfs4_xattr_entry_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static struct list_lru nfs4_xattr_large_entry_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static struct kmem_cache *nfs4_xattr_cache_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Hashing helper functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) INIT_HLIST_HEAD(&cache->buckets[i].hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_lock_init(&cache->buckets[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) cache->buckets[i].cache = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cache->buckets[i].draining = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Locking order:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * 1. inode i_lock or bucket lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * 2. list_lru lock (taken by list_lru_* functions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Wrapper functions to add a cache entry to the right LRU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct list_lru *lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return list_lru_add(lru, &entry->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct list_lru *lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return list_lru_del(lru, &entry->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * This function allocates cache entries. They are the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * extended attribute name/value pairs, but may also be a listxattr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * cache. Those allocations use the same entry so that they can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * treated as one by the memory shrinker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * xattr cache entries are allocated together with names. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * value fits in to one page with the entry structure and the name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * it will also be part of the same allocation (kmalloc). This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * expected to be the vast majority of cases. Larger allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * have a value pointer that is allocated separately by kvmalloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * @name: Name of the extended attribute. NULL for listxattr cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * @value: Value of attribute, or listxattr cache. NULL if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * value is to be copied from pages instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @pages: Pages to copy the value from, if not NULL. Passed in to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * make it easier to copy the value after an RPC, even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * the value will not be passed up to application (e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * for a 'query' getxattr with NULL buffer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @len: Length of the value. Can be 0 for zero-length attribues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @value and @pages will be NULL if @len is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static struct nfs4_xattr_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) nfs4_xattr_alloc_entry(const char *name, const void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct page **pages, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void *valp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) char *namep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) size_t alloclen, slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) uint32_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) XATTR_NAME_MAX + 1 > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) alloclen = sizeof(struct nfs4_xattr_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (name != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) slen = strlen(name) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) alloclen += slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) slen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (alloclen + len <= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) alloclen += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) flags = NFS4_XATTR_ENTRY_EXTVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) entry = (struct nfs4_xattr_entry *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (name != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) namep = buf + sizeof(struct nfs4_xattr_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) memcpy(namep, name, slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) namep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (flags & NFS4_XATTR_ENTRY_EXTVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (valp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) } else if (len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) valp = buf + sizeof(struct nfs4_xattr_entry) + slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) valp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (valp != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (value != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) memcpy(valp, value, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) _copy_from_pages(valp, pages, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) entry->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) entry->xattr_value = valp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) kref_init(&entry->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) entry->xattr_name = namep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) entry->xattr_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) entry->bucket = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) INIT_LIST_HEAD(&entry->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) INIT_LIST_HEAD(&entry->dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) INIT_HLIST_NODE(&entry->hnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) kvfree(entry->xattr_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) nfs4_xattr_free_entry_cb(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) entry = container_of(kref, struct nfs4_xattr_entry, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (WARN_ON(!list_empty(&entry->lru)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) nfs4_xattr_free_entry(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) nfs4_xattr_free_cache_cb(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cache = container_of(kref, struct nfs4_xattr_cache, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) cache->buckets[i].draining = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) cache->listxattr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) kmem_cache_free(nfs4_xattr_cache_cachep, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static struct nfs4_xattr_cache *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) nfs4_xattr_alloc_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) cache = kmem_cache_alloc(nfs4_xattr_cache_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) GFP_KERNEL_ACCOUNT | GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) kref_init(&cache->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) atomic_long_set(&cache->nent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Set the listxattr cache, which is a special-cased cache entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * The special value ERR_PTR(-ESTALE) is used to indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * the cache is being drained - this prevents a new listxattr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * cache from being added to what is now a stale cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct nfs4_xattr_entry *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct nfs4_xattr_entry *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) spin_lock(&cache->listxattr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) old = cache->listxattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (old == ERR_PTR(-ESTALE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) cache->listxattr = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (new != NULL && new != ERR_PTR(-ESTALE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) nfs4_xattr_entry_lru_add(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (old != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) nfs4_xattr_entry_lru_del(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) kref_put(&old->ref, nfs4_xattr_free_entry_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) spin_unlock(&cache->listxattr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Unlink a cache from its parent inode, clearing out an invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * cache. Must be called with i_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static struct nfs4_xattr_cache *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) nfs4_xattr_cache_unlink(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct nfs_inode *nfsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct nfs4_xattr_cache *oldcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) oldcache = nfsi->xattr_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (oldcache != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) oldcache->inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) nfsi->xattr_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return oldcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * Discard a cache. Called by get_cache() if there was an old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * invalid cache. Can also be called from a shrinker callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * The cache is dead, it has already been unlinked from its inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * and no longer appears on the cache LRU list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Mark all buckets as draining, so that no new entries are added. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * could still happen in the unlikely, but possible case that another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * thread had grabbed a reference before it was unlinked from the inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * and is still holding it for an add operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Remove all entries from the LRU lists, so that there is no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * any way to 'find' this cache. Then, remove the entries from the hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * At that point, the cache will remain empty and can be freed when the final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * reference drops, which is very likely the kref_put at the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * this function, or the one called immediately afterwards in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * shrinker callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct nfs4_xattr_bucket *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct hlist_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) bucket = &cache->buckets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) spin_lock(&bucket->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) bucket->draining = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) nfs4_xattr_entry_lru_del(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) hlist_del_init(&entry->hnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spin_unlock(&bucket->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) atomic_long_set(&cache->nent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Get a referenced copy of the cache structure. Avoid doing allocs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * while holding i_lock. Which means that we do some optimistic allocation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * and might have to free the result in rare cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * and acts accordingly, replacing the cache when needed. For the read case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * (!add), this means that the caller must make sure that the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * is valid before caling this function. getxattr and listxattr call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * revalidate_inode to do this. The attribute cache timeout (for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * non-delegated case) is expected to be dealt with in the revalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static struct nfs4_xattr_cache *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) nfs4_xattr_get_cache(struct inode *inode, int add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct nfs_inode *nfsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct nfs4_xattr_cache *cache, *oldcache, *newcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) cache = oldcache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (nfsi->cache_validity & NFS_INO_INVALID_XATTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) oldcache = nfs4_xattr_cache_unlink(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) cache = nfsi->xattr_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (cache != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) kref_get(&cache->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (add && cache == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) newcache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) cache = nfs4_xattr_alloc_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * The cache was invalidated again. Give up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * since what we want to enter is now likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * outdated anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * Check if someone beat us to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (nfsi->xattr_cache != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) newcache = nfsi->xattr_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) kref_get(&newcache->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) kref_get(&cache->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) nfsi->xattr_cache = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) cache->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * If there was a race, throw away the cache we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * allocated, and use the new one allocated by someone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (newcache != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) cache = newcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Discard the now orphaned old cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (oldcache != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) nfs4_xattr_discard_cache(oldcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static inline struct nfs4_xattr_bucket *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return &cache->buckets[jhash(name, strlen(name), 0) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) (ARRAY_SIZE(cache->buckets) - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static struct nfs4_xattr_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) hlist_for_each_entry(entry, &bucket->hlist, hnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!strcmp(entry->xattr_name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct nfs4_xattr_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct nfs4_xattr_bucket *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct nfs4_xattr_entry *oldentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) entry->bucket = bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) spin_lock(&bucket->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (bucket->draining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (oldentry != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) hlist_del_init(&oldentry->hnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) nfs4_xattr_entry_lru_del(oldentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) atomic_long_inc(&cache->nent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) hlist_add_head(&entry->hnode, &bucket->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) nfs4_xattr_entry_lru_add(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) spin_unlock(&bucket->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (oldentry != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct nfs4_xattr_bucket *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) bucket = nfs4_xattr_hash_bucket(cache, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) spin_lock(&bucket->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) entry = nfs4_xattr_get_entry(bucket, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (entry != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) hlist_del_init(&entry->hnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) nfs4_xattr_entry_lru_del(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) atomic_long_dec(&cache->nent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) spin_unlock(&bucket->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (entry != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static struct nfs4_xattr_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct nfs4_xattr_bucket *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) bucket = nfs4_xattr_hash_bucket(cache, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) spin_lock(&bucket->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) entry = nfs4_xattr_get_entry(bucket, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (entry != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) kref_get(&entry->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) spin_unlock(&bucket->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * Entry point to retrieve an entry from the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ssize_t buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) cache = nfs4_xattr_get_cache(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) entry = nfs4_xattr_hash_find(cache, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (entry != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dprintk("%s: cache hit '%s', len %lu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) entry->xattr_name, (unsigned long)entry->xattr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (buflen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Length probe only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ret = entry->xattr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) } else if (buflen < entry->xattr_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) memcpy(buf, entry->xattr_value, entry->xattr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ret = entry->xattr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dprintk("%s: cache miss '%s'\n", __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * Retrieve a cached list of xattrs from the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) cache = nfs4_xattr_get_cache(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) spin_lock(&cache->listxattr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) entry = cache->listxattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (entry != NULL && entry != ERR_PTR(-ESTALE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (buflen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* Length probe only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ret = entry->xattr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) } else if (entry->xattr_size > buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) memcpy(buf, entry->xattr_value, entry->xattr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ret = entry->xattr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) spin_unlock(&cache->listxattr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * Add an xattr to the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * This also invalidates the xattr list cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) void nfs4_xattr_cache_add(struct inode *inode, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) const char *buf, struct page **pages, ssize_t buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) dprintk("%s: add '%s' len %lu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) name, (unsigned long)buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) cache = nfs4_xattr_get_cache(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (entry == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) (void)nfs4_xattr_set_listcache(cache, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!nfs4_xattr_hash_add(cache, entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * Remove an xattr from the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * This also invalidates the xattr list cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) void nfs4_xattr_cache_remove(struct inode *inode, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) dprintk("%s: remove '%s'\n", __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) cache = nfs4_xattr_get_cache(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) (void)nfs4_xattr_set_listcache(cache, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) nfs4_xattr_hash_remove(cache, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * Cache listxattr output, replacing any possible old one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ssize_t buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) cache = nfs4_xattr_get_cache(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (entry == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * This is just there to be able to get to bucket->cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * which is obviously the same for all buckets, so just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * use bucket 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) entry->bucket = &cache->buckets[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (!nfs4_xattr_set_listcache(cache, entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Zap the entire cache. Called when an inode is evicted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) void nfs4_xattr_cache_zap(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct nfs4_xattr_cache *oldcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) oldcache = nfs4_xattr_cache_unlink(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (oldcache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) nfs4_xattr_discard_cache(oldcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * The entry LRU is shrunk more aggressively than the cache LRU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * by settings @seeks to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * Cache structures are freed only when they've become empty, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * pruning all but one entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct shrink_control *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct shrink_control *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct shrink_control *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct shrink_control *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static struct shrinker nfs4_xattr_cache_shrinker = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) .count_objects = nfs4_xattr_cache_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) .scan_objects = nfs4_xattr_cache_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) .seeks = DEFAULT_SEEKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) .flags = SHRINKER_MEMCG_AWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static struct shrinker nfs4_xattr_entry_shrinker = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) .count_objects = nfs4_xattr_entry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) .scan_objects = nfs4_xattr_entry_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) .seeks = DEFAULT_SEEKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) .batch = 512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) .flags = SHRINKER_MEMCG_AWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static struct shrinker nfs4_xattr_large_entry_shrinker = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) .count_objects = nfs4_xattr_entry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) .scan_objects = nfs4_xattr_entry_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) .seeks = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) .batch = 512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) .flags = SHRINKER_MEMCG_AWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static enum lru_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) cache_lru_isolate(struct list_head *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct list_head *dispose = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct nfs4_xattr_cache *cache = container_of(item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct nfs4_xattr_cache, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (atomic_long_read(&cache->nent) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return LRU_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * If a cache structure is on the LRU list, we know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * its inode is valid. Try to lock it to break the link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * Since we're inverting the lock order here, only try.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) inode = cache->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (!spin_trylock(&inode->i_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return LRU_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) kref_get(&cache->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) cache->inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) NFS_I(inode)->xattr_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) list_lru_isolate(lru, &cache->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) list_add_tail(&cache->dispose, dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return LRU_REMOVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) LIST_HEAD(dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) unsigned long freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) cache_lru_isolate, &dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) while (!list_empty(&dispose)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) cache = list_first_entry(&dispose, struct nfs4_xattr_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) list_del_init(&cache->dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) nfs4_xattr_discard_cache(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) count = list_lru_shrink_count(&nfs4_xattr_cache_lru, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return vfs_pressure_ratio(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static enum lru_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) entry_lru_isolate(struct list_head *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct list_head *dispose = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct nfs4_xattr_bucket *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct nfs4_xattr_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct nfs4_xattr_entry *entry = container_of(item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct nfs4_xattr_entry, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) bucket = entry->bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) cache = bucket->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * Unhook the entry from its parent (either a cache bucket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * or a cache structure if it's a listxattr buf), so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * it's no longer found. Then add it to the isolate list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * to be freed later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * In both cases, we're reverting lock order, so use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * trylock and skip the entry if we can't get the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (entry->xattr_name != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* Regular cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!spin_trylock(&bucket->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return LRU_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) kref_get(&entry->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) hlist_del_init(&entry->hnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) atomic_long_dec(&cache->nent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) list_lru_isolate(lru, &entry->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) spin_unlock(&bucket->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* Listxattr cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (!spin_trylock(&cache->listxattr_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return LRU_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) kref_get(&entry->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) cache->listxattr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) list_lru_isolate(lru, &entry->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) spin_unlock(&cache->listxattr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) list_add_tail(&entry->dispose, dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return LRU_REMOVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) LIST_HEAD(dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) unsigned long freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct nfs4_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct list_lru *lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) while (!list_empty(&dispose)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) entry = list_first_entry(&dispose, struct nfs4_xattr_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) list_del_init(&entry->dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * Drop two references: the one that we just grabbed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * in entry_lru_isolate, and the one that was set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * when the entry was first allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct list_lru *lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) count = list_lru_shrink_count(lru, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return vfs_pressure_ratio(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static void nfs4_xattr_cache_init_once(void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) spin_lock_init(&cache->listxattr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) atomic_long_set(&cache->nent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) nfs4_xattr_hash_init(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) cache->listxattr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) INIT_LIST_HEAD(&cache->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) INIT_LIST_HEAD(&cache->dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) int __init nfs4_xattr_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) sizeof(struct nfs4_xattr_cache), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) nfs4_xattr_cache_init_once);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (nfs4_xattr_cache_cachep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) &nfs4_xattr_large_entry_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) goto out4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) &nfs4_xattr_entry_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) &nfs4_xattr_cache_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ret = register_shrinker(&nfs4_xattr_cache_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ret = register_shrinker(&nfs4_xattr_entry_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ret = register_shrinker(&nfs4_xattr_large_entry_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) unregister_shrinker(&nfs4_xattr_entry_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) unregister_shrinker(&nfs4_xattr_cache_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) list_lru_destroy(&nfs4_xattr_cache_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) list_lru_destroy(&nfs4_xattr_entry_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) list_lru_destroy(&nfs4_xattr_large_entry_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) out4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) kmem_cache_destroy(nfs4_xattr_cache_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) void nfs4_xattr_cache_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) unregister_shrinker(&nfs4_xattr_entry_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) unregister_shrinker(&nfs4_xattr_cache_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) list_lru_destroy(&nfs4_xattr_large_entry_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) list_lru_destroy(&nfs4_xattr_entry_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) list_lru_destroy(&nfs4_xattr_cache_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) kmem_cache_destroy(nfs4_xattr_cache_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }